source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
DRB046-doall2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Only one loop is associated with the omp for construct.
The inner loop's loop iteration variable needs an explicit private() clause,
otherwise it will be shared by default.
*/
#include <stdio.h>
#include <stdlib.h>
int a[100][100];
int main()
{
int i,j;
#pragma omp parallel for private(j)
for (i=0;i<100;i++)
#pragma omp parallel for simd
for (j=0;j<100;j++)
a[i][j]= i * 200 + j;
#pragma omp parallel for private(j)
for (i=0;i<100;i++)
#pragma omp parallel for simd
for (j=0;j<100;j++)
a[i][j]=a[i][j]+1;
#pragma omp parallel for private(j) ordered
for (i=0;i<100;i++)
#pragma omp parallel for simd ordered
for (j=0;j<100;j++)
#pragma omp ordered simd
printf("%d", a[i][j]);
return 0;
}
|
triplet.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "bzgrid.h"
#include "triplet.h"
#include "triplet_iw.h"
#include "triplet_grid.h"
long tpl_get_BZ_triplets_at_q(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets)
{
return tpk_get_BZ_triplets_at_q(triplets,
grid_point,
bzgrid,
map_triplets);
}
long tpl_get_triplets_reciprocal_mesh_at_q(long *map_triplets,
long *map_q,
const long grid_point,
const long mesh[3],
const long is_time_reversal,
const long num_rot,
const long (*rec_rotations)[3][3],
const long swappable)
{
long num_ir;
num_ir = tpk_get_ir_triplets_at_q(map_triplets,
map_q,
grid_point,
mesh,
is_time_reversal,
rec_rotations,
num_rot,
swappable);
return num_ir;
}
void tpl_get_integration_weight(double *iw,
char *iw_zero,
const double *frequency_points,
const long num_band0,
const long relative_grid_address[24][4][3],
const long (*triplets)[3],
const long num_triplets,
const ConstBZGrid *bzgrid,
const double *frequencies1,
const long num_band1,
const double *frequencies2,
const long num_band2,
const long tp_type,
const long openmp_per_triplets,
const long openmp_per_bands)
{
long i, num_band_prod;
long tp_relative_grid_address[2][24][4][3];
tpl_set_relative_grid_address(tp_relative_grid_address,
relative_grid_address,
tp_type);
num_band_prod = num_band0 * num_band1 * num_band2;
#ifdef PHPYOPENMP
#pragma omp parallel for if (openmp_per_triplets)
#endif
for (i = 0; i < num_triplets; i++)
{
tpi_get_integration_weight(iw + i * num_band_prod,
iw_zero + i * num_band_prod,
frequency_points, /* f0 */
num_band0,
tp_relative_grid_address,
triplets[i],
num_triplets,
bzgrid,
frequencies1, /* f1 */
num_band1,
frequencies2, /* f2 */
num_band2,
tp_type,
openmp_per_bands);
}
}
void tpl_get_integration_weight_with_sigma(double *iw,
char *iw_zero,
const double sigma,
const double sigma_cutoff,
const double *frequency_points,
const long num_band0,
const long (*triplets)[3],
const long num_triplets,
const double *frequencies,
const long num_band,
const long tp_type)
{
long i, num_band_prod, const_adrs_shift;
double cutoff;
cutoff = sigma * sigma_cutoff;
num_band_prod = num_band0 * num_band * num_band;
const_adrs_shift = num_triplets * num_band0 * num_band * num_band;
#ifdef PHPYOPENMP
#pragma omp parallel for
#endif
for (i = 0; i < num_triplets; i++)
{
tpi_get_integration_weight_with_sigma(
iw + i * num_band_prod,
iw_zero + i * num_band_prod,
sigma,
cutoff,
frequency_points,
num_band0,
triplets[i],
const_adrs_shift,
frequencies,
num_band,
tp_type,
0);
}
}
long tpl_is_N(const long triplet[3], const long (*bz_grid_addresses)[3])
{
long i, j, sum_q, is_N;
is_N = 1;
for (i = 0; i < 3; i++)
{
sum_q = 0;
for (j = 0; j < 3; j++)
{ /* 1st, 2nd, 3rd triplet */
sum_q += bz_grid_addresses[triplet[j]][i];
}
if (sum_q)
{
is_N = 0;
break;
}
}
return is_N;
}
void tpl_set_relative_grid_address(
long tp_relative_grid_address[2][24][4][3],
const long relative_grid_address[24][4][3],
const long tp_type)
{
long i, j, k, l;
long signs[2];
signs[0] = 1;
signs[1] = 1;
if ((tp_type == 2) || (tp_type == 3))
{
/* q1+q2+q3=G */
/* To set q2+1, q3-1 is needed to keep G */
signs[1] = -1;
}
/* tp_type == 4, q+k_i-k_f=G */
for (i = 0; i < 2; i++)
{
for (j = 0; j < 24; j++)
{
for (k = 0; k < 4; k++)
{
for (l = 0; l < 3; l++)
{
tp_relative_grid_address[i][j][k][l] =
relative_grid_address[j][k][l] * signs[i];
}
}
}
}
}
|
GB_binop__isgt_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__isgt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint64)
// A*D function (colscale): GB (_AxD__isgt_uint64)
// D*A function (rowscale): GB (_DxB__isgt_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint64)
// C=scalar+B GB (_bind1st__isgt_uint64)
// C=scalar+B' GB (_bind1st_tran__isgt_uint64)
// C=A+scalar GB (_bind2nd__isgt_uint64)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT64 || GxB_NO_ISGT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__plus_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint8)
// A*D function (colscale): GB (_AxD__plus_uint8)
// D*A function (rowscale): GB (_DxB__plus_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint8)
// C=scalar+B GB (_bind1st__plus_uint8)
// C=scalar+B' GB (_bind1st_tran__plus_uint8)
// C=A+scalar GB (_bind2nd__plus_uint8)
// C=A'+scalar GB (_bind2nd_tran__plus_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT8 || GxB_NO_PLUS_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_print_results.c | /*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags )
{
int num_threads, max_threads;
max_threads = 1;
num_threads = 1;
/* figure out number of threads used */
#ifdef _OPENMP
max_threads = omp_get_max_threads();
#pragma omp parallel shared(num_threads)
{
#pragma omp master
num_threads = omp_get_num_threads();
}
#endif
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", class );
if( n3 == 0 ) {
long nn = n1;
if ( n2 != 0 ) nn *= n2;
printf( " Size = %12ld\n", nn ); /* as in IS */
}
else
printf( " Size = %4dx%4dx%4d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2f\n", t );
printf( " Total threads = %12d\n", num_threads);
printf( " Avail threads = %12d\n", max_threads);
if (num_threads != max_threads)
printf( " Warning: Threads used differ from threads available\n");
printf( " Mop/s total = %12.2f\n", mops );
printf( " Mop/s/thread = %12.2f\n",
mops/(double)num_threads );
printf( " Operation type = %24s\n", optype);
if( passed_verification < 0 )
printf( " Verification = NOT PERFORMED\n" );
else if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( "\n\n" );
printf( " Please send all errors/feedbacks to:\n\n" );
printf( " NPB Development Team\n" );
printf( " npb@nas.nasa.gov\n\n" );
/* printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 650-604-3957\n\n" ); */
}
|
NAS_UA.c | //---------------------------------------------------------------------
// program UA
//---------------------------------------------------------------------
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#if !defined(CLASS_W) && !defined(CLASS_S) && !defined(CLASS_A) && !defined(CLASS_B) && !defined(CLASS_C) && !defined(CLASS_D) && !defined(CLASS_E)
# define CLASS_W
#endif
//----------
// Class S:
//----------
#ifdef CLASS_S
# define LELT 250
# define LMOR 11600
# define REFINE_MAX 4
# define FRE_DEFAULT 5
# define NITER_DEFAULT 50
# define NMXH_DEFAULT 10
# define CLASS_DEFAULT 'S'
# define ALPHA_DEFAULT 0.040e0
#endif
//----------
// Class W:
//----------
#ifdef CLASS_W
# define LELT 700
# define LMOR 26700
# define REFINE_MAX 5
# define FRE_DEFAULT 5
# define NITER_DEFAULT 100
# define NMXH_DEFAULT 10
# define CLASS_DEFAULT 'W'
# define ALPHA_DEFAULT 0.060e0
#endif
//----------
// Class A:
//----------
#ifdef CLASS_A
# define LELT 2400
# define LMOR 92700
# define REFINE_MAX 6
# define FRE_DEFAULT 5
# define NITER_DEFAULT 200
# define NMXH_DEFAULT 10
# define CLASS_DEFAULT 'A'
# define ALPHA_DEFAULT 0.076e0
#endif
//----------
// Class B:
//----------
#ifdef CLASS_B
# define LELT 8800
# define LMOR 334600
# define REFINE_MAX 7
# define FRE_DEFAULT 5
# define NITER_DEFAULT 200
# define NMXH_DEFAULT 10
# define CLASS_DEFAULT 'B'
# define ALPHA_DEFAULT 0.076e0
#endif
//----------
// Class C:
//----------
#ifdef CLASS_C
# define LELT 33500
# define LMOR 1262100
# define REFINE_MAX 8
# define FRE_DEFAULT 5
# define NITER_DEFAULT 200
# define NMXH_DEFAULT 10
# define CLASS_DEFAULT 'C'
# define ALPHA_DEFAULT 0.067e0
#endif
//----------
// Class D:
//----------
#ifdef CLASS_D
# define LELT 515000
# define LMOR 19500000
# define REFINE_MAX 10
# define FRE_DEFAULT 5
# define NITER_DEFAULT 250
# define NMXH_DEFAULT 10
# define CLASS_DEFAULT 'D'
# define ALPHA_DEFAULT 0.046e0
#endif
typedef struct
{
double real;
double imag;
} dcomplex;
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
// Array dimensions
#define LX1 5
#define LNJE 2
#define NSIDES 6
#define NXYZ (LX1*LX1*LX1)
/* common /usrdati/ */
int fre, niter, nmxh;
/* common /usrdatr/ */
double alpha, dlmin, dtime;
/* common /dimn/ */
int nelt, ntot, nmor, nvertex;
/* common /bench1/ */
double x0, _y0, z0, time;
#define VELX 3.0
#define VELY 3.0
#define VELZ 3.0
#define VISC 0.005
#define X00 (3.0/7.0)
#define Y00 (2.0/7.0)
#define Z00 (2.0/7.0)
// double arrays associated with collocation points
/* common /colldp/ */
double ta1 [LELT][LX1][LX1][LX1];
double ta2 [LELT][LX1][LX1][LX1];
double trhs [LELT][LX1][LX1][LX1];
double t [LELT][LX1][LX1][LX1];
double tmult [LELT][LX1][LX1][LX1];
double dpcelm[LELT][LX1][LX1][LX1];
double pdiff [LELT][LX1][LX1][LX1];
double pdiffp[LELT][LX1][LX1][LX1];
// double arrays associated with mortar points
/* common /mortdp/ */
double umor [LMOR];
double mormult[LMOR];
double tmort [LMOR];
double tmmor [LMOR];
double rmor [LMOR];
double dpcmor [LMOR];
double pmorx [LMOR];
double ppmor [LMOR];
// integer arrays associated with element faces
/* common/facein/ */
int idmo [LELT][NSIDES][LNJE][LNJE][LX1][LX1];
int idel [LELT][NSIDES][LX1][LX1];
int sje [LELT][NSIDES][2][2];
int sje_new [LELT][NSIDES][2][2];
int ijel [LELT][NSIDES][2];
int ijel_new[LELT][NSIDES][2];
int cbc [LELT][NSIDES]; /**/
int cbc_new [LELT][NSIDES]; /**/
// integer array associated with vertices
/* common /vin/ */
int vassign[LELT][8];
int emo [8 * LELT][8][2];
int nemo [8 * LELT];
// integer array associated with element edges
/* common /edgein/ */
int diagn[LELT][12][2];
// integer arrays associated with elements
/* common /eltin/ */
int tree [LELT];
int treenew [LELT];
int mt_to_id [LELT];
int mt_to_id_old[LELT];
int id_to_mt [LELT];
int newc [LELT]; /**/
int newi [LELT]; /**/
int newe [LELT]; /**/
int ref_front_id[LELT]; /**/
int ich [LELT]; /**/
int size_e [LELT];
int front [LELT];
int action [LELT];
// int arrays associated with vertices
/* common /vlg/ */
int ifpcmor[8 * LELT];
// int arrays associated with edge
/* common /edgelg/ */
int eassign [LELT][12];
int ncon_edge[LELT][12];
int if_1_edge[LELT][12];
// int arrays associated with elements
/* common /facelg/ */
int skip [LELT];
int ifcoa [LELT];
int ifcoa_id[LELT];
// int arrays associated with element faces
/* common /masonl/ */
int fassign[LELT][NSIDES];
int edgevis[LELT][NSIDES][4];
// small arrays
/* common /transr/ */
double qbnew[2][LX1][LX1 - 2];
double bqnew[2][LX1 - 2][LX1 - 2];
/* common /pcr/ */
double pcmor_nc1[REFINE_MAX][2][2][LX1][LX1];
double pcmor_nc2[REFINE_MAX][2][2][LX1][LX1];
double pcmor_nc0[REFINE_MAX][2][2][LX1][LX1];
double pcmor_c [REFINE_MAX][LX1][LX1];
double tcpre [LX1][LX1];
double pcmor_cor[REFINE_MAX][8];
// gauss-labotto and gauss points
/* common /gauss/ */
double zgm1[LX1];
// weights
/* common /wxyz/ */
double wxm1[LX1];
double w3m1[LX1][LX1][LX1];
// coordinate of element vertices
/* common /coord/ */
double xc[LELT][8];
double yc[LELT][8];
double zc[LELT][8];
double xc_new[LELT][8];
double yc_new[LELT][8];
double zc_new[LELT][8];
// dr/dx, dx/dr and Jacobian
/* common /giso/ */
double jacm1_s[REFINE_MAX][LX1][LX1][LX1];
double rxm1_s[REFINE_MAX][LX1][LX1][LX1];
double xrm1_s[REFINE_MAX][LX1][LX1][LX1];
// mass matrices (diagonal)
/* common /mass/ */
double bm1_s[REFINE_MAX][LX1][LX1][LX1];
// dertivative matrices d/dr
/* common /dxyz/ */
double dxm1[LX1][LX1];
double dxtm1[LX1][LX1];
double wdtdr[LX1][LX1];
// interpolation operators
/* common /ixyz/ */
double ixm31 [LX1 * 2 - 1][LX1];
double ixtm31[LX1][LX1 * 2 - 1];
double ixmc1 [LX1][LX1];
double ixtmc1[LX1][LX1];
double ixmc2 [LX1][LX1];
double ixtmc2[LX1][LX1];
double map2 [LX1];
double map4 [LX1];
// collocation location within an element
/* common /xfracs/ */
double xfrac[LX1];
// used in laplacian operator
/* common /gmfact/ */
double g1m1_s[REFINE_MAX][LX1][LX1][LX1];
double g4m1_s[REFINE_MAX][LX1][LX1][LX1];
double g5m1_s[REFINE_MAX][LX1][LX1][LX1];
double g6m1_s[REFINE_MAX][LX1][LX1][LX1];
// We store some tables of useful topoint constants
// These constants are intialized in a block data 'top_constants'
/* common /top_consts/ */
int f_e_ef[6][4];
int e_c[8][3];
int local_corner[6][8];
int cal_nnb[8][3];
int oplc[4];
int cal_iijj[4][2];
int cal_intempx[6][4];
int c_f[6][4];
int le_arr[3][2][4];
int jjface[6];
int e_face2[6][4];
int op[4];
int localedgenumber[12][6];
int edgenumber[6][4];
int f_c[8][3];
int e1v1[6][6];
int e2v1[6][6];
int e1v2[6][6];
int e2v2[6][6];
int children[6][4];
int iijj[4][2];
int v_end[2];
int face_l1[3];
int face_l2[3];
int face_ld[3];
// Timer parameters
/* common /timing/ */
#define t_total 1
#define t_init 2
#define t_convect 3
#define t_transfb_c 4
#define t_diffusion 5
#define t_transf 6
#define t_transfb 7
#define t_adaptation 8
#define t_transf2 9
#define t_add2 10
#define t_last 10
#define btest(i,p) (i & (1 << p))
void do_coarsen(int *if_coarsen, int *icoarsen, int neltold);
void do_refine(int *ifmortar, int *irefine);
int ifcor(int n1, int n2, int i, int iface);
int icheck(int ie, int n);
void find_coarsen(int *if_coarsen, int neltold);
void find_refine(int *if_refine);
void check_refine(int *ifrepeat);
int iftouch(int iel);
void remap(double y[LX1][LX1][LX1], double y1[7][LX1][LX1][LX1],
double x[LX1][LX1][LX1]);
void merging(int iela[8]);
void remap2(int iela[8], int ielnew);
void remapz(double x1[LX1][LX1][LX1], double x2[LX1][LX1][LX1],
double y[LX1][LX1][LX1]);
void remapy(double x1[LX1][LX1][LX1], double x2[LX1][LX1][LX1],
double y[LX1][LX1][LX1]);
void remapx(double x1[LX1][LX1][LX1], double x2[LX1][LX1][LX1],
double y[LX1][LX1][LX1]);
void convect(int ifmortar);
void diffusion(int ifmortar);
void laplacian(double r[LX1][LX1][LX1], double u[LX1][LX1][LX1], int sizei);
void adaptation(int *ifmortar, int step);
void move();
void mortar();
int ifsame(int iel, int i, int ntemp, int j);
void setuppc();
void setpcmo_pre();
void setpcmo();
void reciprocal(double a[], int n);
void r_init(double a[], int n, double _cnst);
void nr_init(int a[], int n, int _cnst);
void l_init(int a[], int n, int _cnst);
void ncopy(int a[], int b[], int n);
void copy(double a[], double b[], int n);
void adds2m1(double a[], double b[], double c1, int n);
void adds1m1(double a[], double b[], double c1, int n);
void col2(double a[], double b[], int n);
void nrzero(int na[], int n);
void add2(double a[], double b[], int n);
double calc_norm();
void parallel_add(int frontier[]);
void dssum();
void facev(double a[LX1][LX1][LX1], int iface, double val);
void transf(double tmor[], double tx[]);
void transfb(double tmor[], double tx[]);
void transfb_cor_e(int n, double *tmor, double tx[LX1][LX1][LX1]);
void transfb_cor_f(int n, double *tmor, double tx[LX1][LX1][LX1]);
void transf_nc(double tmor[LX1][LX1], double tx[LX1][LX1]);
void transfb_nc0(double tmor[LX1][LX1], double tx[LX1][LX1][LX1]);
void transfb_nc2(double tmor[LX1][LX1], double tx[LX1][LX1]);
void transfb_nc1(double tmor[LX1][LX1], double tx[LX1][LX1]);
void transfb_c(double tx[]);
void transfb_c_2(double tx[]);
void verify(char *Class, int *verified);
void create_initial_grid();
void coef();
void geom1();
void setdef();
void prepwork();
void top_constants();
void get_emo(int ie, int n, int ng);
void mor_assign(int mor_v[3], int *count);
void mor_edge(int ie, int face, int iel, int mor_v[3]);
void edgecopy_s(int face, int iel);
void mor_s_e(int n, int face, int iel, int mor_s_v[2][4]);
void mor_s_e_nn(int n, int face, int iel, int mor_s_v[4], int nn);
void mortar_vertex(int i, int iel, int count);
void mor_ne(int mor_v[3], int nn, int edge, int face,
int edge2, int face2, int ntemp, int iel);
void pc_corner(int imor);
void com_dpc(int iside, int iel, int enumber, int n, int isize);
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified);
double start[64], elapsed[64];
double elapsed_time( void );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
void wtime(double *t);
int main(int argc, char *argv[])
{
int step, ie, iside, i, j, k;
double mflops, tmax, nelt_tot = 0.0;
char Class;
int ifmortar = 0, verified;
double t2, trecs[t_last + 1];
char *t_names[t_last + 1];
//---------------------------------------------------------------------
// Read input file (if it exists), else take
// defaults from parameters
//---------------------------------------------------------------------
FILE *fp;
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - UA Benchmark\n\n");
if ((fp = fopen("inputua.data", "r")) != NULL)
{
int result;
printf(" Reading from input file inputua.data\n");
result = fscanf(fp, "%d", &fre);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d", &nmxh);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%lf", &alpha);
Class = 'U';
fclose(fp);
}
else
{
printf(" No input file inputua.data. Using compiled defaults\n");
fre = FRE_DEFAULT;
niter = NITER_DEFAULT;
nmxh = NMXH_DEFAULT;
alpha = ALPHA_DEFAULT;
Class = CLASS_DEFAULT;
}
dlmin = pow(0.5, REFINE_MAX);
dtime = 0.04 * dlmin;
printf(" Levels of refinement: %8d\n", REFINE_MAX);
printf(" Adaptation frequency: %8d\n", fre);
printf(" Time steps: %8d dt: %15.6E\n", niter, dtime);
printf(" CG iterations: %8d\n", nmxh);
printf(" Heat source radius: %8.4f\n\n", alpha);
top_constants();
for (i = 1; i <= t_last; i++)
{
timer_clear(i);
}
// set up initial mesh (single element) and solution (all zero)
create_initial_grid();
r_init((double *)ta1, ntot, 0.0);
nr_init((int *)sje, 4 * 6 * nelt, -1);
// compute tables of coefficients and weights
coef();
geom1();
// compute the discrete laplacian operators
setdef();
// prepare for the preconditioner
setpcmo_pre();
// refine initial mesh and do some preliminary work
time = 0.0;
mortar();
prepwork();
adaptation(&ifmortar, 0);
timer_clear(1);
time = 0.0;
for (step = 0; step <= niter; step++)
{
if (step == 1)
{
// reset the solution and start the timer, keep track of total no elms
r_init((double *)ta1, ntot, 0.0);
time = 0.0;
nelt_tot = 0.0;
for (i = 1; i <= t_last; i++)
{
if (i != t_init) timer_clear(i);
}
timer_start(1);
}
// advance the convection step
convect(ifmortar);
// prepare the intital guess for cg
transf(tmort, (double *)ta1);
// compute residual for diffusion term based on intital guess
// compute the left hand side of equation, lapacian t
for (ie = 0; ie < nelt; ie++)
{
laplacian(ta2[ie], ta1[ie], size_e[ie]);
}
// compute the residual
#pragma omp parallel for default(shared) private(ie, k, j, i) firstprivate(nelt, ta2)
for (ie = 0; ie < nelt; ie++)
{
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
trhs[ie][k][j][i] = trhs[ie][k][j][i] - ta2[ie][k][j][i];
}
}
}
}
// get the residual on mortar
transfb(rmor, (double *)trhs);
// apply boundary condition: zero out the residual on domain boundaries
// apply boundary conidtion to trhs
#pragma omp parallel for default(shared) private(ie, iside) firstprivate(nelt, cbc)
for (ie = 0; ie < nelt; ie++)
{
for (iside = 0; iside < NSIDES; iside++)
{
if (cbc[ie][iside] == 0)
{
facev(trhs[ie], iside, 0.0);
}
}
}
// apply boundary condition to rmor
col2(rmor, tmmor, nmor);
// call the conjugate gradient iterative solver
diffusion(ifmortar);
// add convection and diffusion
add2((double *)ta1, (double *)t, ntot);
// perform mesh adaptation
time = time + dtime;
if ((step != 0) && (step / fre * fre == step))
{
if (step != niter)
{
adaptation(&ifmortar, step);
}
}
else
{
ifmortar = 0;
}
nelt_tot = nelt_tot + (double)(nelt);
}
timer_stop(1);
tmax = timer_read(1);
verify(&Class, &verified);
// compute millions of collocation points advanced per second.
// diffusion: nmxh advancements, convection: 1 advancement
mflops = nelt_tot * (double)(LX1 * LX1 * LX1 * (nmxh + 1)) / (tmax * 1.e6);
print_results("UA", Class, REFINE_MAX, 0, 0, niter,
tmax, mflops, " coll. point advanced",
verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
//-----------------------------------------------------------
// For 3-D mesh adaptation (refinement+ coarsening)
//-----------------------------------------------------------
void adaptation(int *ifmortar, int step)
{
int if_coarsen, if_refine, ifrepeat;
int iel, miel, irefine, icoarsen, neltold;
*ifmortar = 0;
// compute heat source center(x0,y0,z0)
x0 = X00 + VELX * time;
_y0 = Y00 + VELY * time;
z0 = Z00 + VELZ * time;
// Search elements to be refined. Check with restrictions. Perform
// refinement repeatedly until all desired refinements are done.
// ich[iel]=0 no grid change on element iel
// ich[iel]=2 iel is marked to be coarsened
// ich[iel]=4 iel is marked to be refined
// irefine records how many elements got refined
irefine = 0;
// check whether elements need to be refined because they have overlap
// with the heat source
while (1)
{
find_refine(&if_refine);
if (if_refine)
{
ifrepeat = 1;
while (ifrepeat)
{
// Check with restriction, unmark elements that cannot be refined.
//Elements preventing desired refinement will be marked to be refined.
check_refine(&ifrepeat);
}
// perform refinement
do_refine(ifmortar, &irefine);
}
else
{
break;
}
}
// Search for elements to be coarsened. Check with restrictions,
// Perform coarsening repeatedly until all possible coarsening
// is done.
// icoarsen records how many elements got coarsened
icoarsen = 0;
// skip[iel]=1 indicates an element no longer exists (because it
// got merged)
l_init(skip, nelt, 0);
neltold = nelt;
// Check whether elements need to be coarsened because they don't have
// overlap with the heat source. Only elements that don't have a larger
// size neighbor can be marked to be coarsened
while (1)
{
find_coarsen(&if_coarsen, neltold);
if (if_coarsen)
{
// Perform coarsening, however subject to restriction. Only possible
// coarsening will be performed. if_coarsen=1 indicates that
// actual coarsening happened
do_coarsen(&if_coarsen, &icoarsen, neltold);
if (if_coarsen)
{
// ifmortar=1 indicates the grid changed, i.e. the mortar points
// indices need to be regenerated on the new grid.
*ifmortar = 1;
}
else
{
break;
}
}
}
printf("Step %4d: elements refined, merged, total:%6d %6d %6d\n",
step, irefine, icoarsen, nelt);
// mt_to_id[miel] takes as argument the morton index and returns the actual
// element index
// id_to_mt(iel) takes as argument the actual element index and returns the
// morton index
for (miel = 0; miel < nelt; miel++)
{
iel = mt_to_id[miel];
id_to_mt[iel] = miel;
}
// Reorder the elements in the order of the morton curve. After the move
// subroutine the element indices are the same as the morton indices
move();
// if the grid changed, regenerate mortar indices and update variables
// associated to grid.
if (*ifmortar)
{
mortar();
prepwork();
}
}
//---------------------------------------------------------------
// Coarsening procedure:
// 1) check with restrictions
// 2) perform coarsening
//---------------------------------------------------------------
void do_coarsen(int *if_coarsen, int *icoarsen, int neltold)
{
int test, test1, test2, test3;
int iel, ntp[8], ic, parent, mielnew, miel;
int i, index, num_coarsen;
*if_coarsen = 0;
// If an element has been merged, it will be skipped afterwards
// skip[iel]=1 for elements that will be skipped.
// ifcoa_id[iel]=1 indicates that element iel will be coarsened
// ifcoa[miel]=1 refers to element miel(mortar index) will be
// coarsened
ncopy(mt_to_id_old, mt_to_id, nelt);
nr_init(mt_to_id, nelt, -1);
l_init(ifcoa_id, neltold, 0);
// Check whether the potential coarsening will make neighbor,
// and neighbor's neighbor....break grid restriction
for (miel = 0; miel < nelt; miel++)
{
ifcoa[miel] = 0;
front[miel] = 0;
iel = mt_to_id_old[miel];
// if an element is marked to be coarsened
if (ich[iel] == 2)
{
// If the current element is the "first" child (front-left-
// bottom) of its parent (tree[iel] mod 8 equals 0), then
// find all its neighbors. Check whether they are from the same
// parent.
ic = tree[iel];
if (!btest(ic, 0) && !btest(ic, 1) && !btest(ic, 2))
{
ntp[0] = iel;
ntp[1] = sje[iel][0][0][0];
ntp[2] = sje[iel][2][0][0];
ntp[3] = sje[ntp[2]][0][0][0];
ntp[4] = sje[iel][4][0][0];
ntp[5] = sje[ntp[4]][0][0][0];
ntp[6] = sje[ntp[4]][2][0][0];
ntp[7] = sje[ntp[6]][0][0][0];
parent = tree[iel] >> 3;
test = 0;
test1 = 1;
for (i = 0; i < 8; i++)
{
if ((tree[ntp[i]] >> 3) != parent) test1 = 0;
}
// check whether all child elements are marked to be coarsened
if (test1)
{
test2 = 1;
for (i = 0; i < 8; i++)
{
if (ich[ntp[i]] != 2) test2 = 0;
}
// check whether all child elements can be coarsened or not.
if (test2)
{
test3 = 1;
for (i = 0; i < 8; i++)
{
if (!icheck(ntp[i], i)) test3 = 0;
}
if (test3) test = 1;
}
}
// if the eight child elements are eligible to be coarsened
// mark the first children ifcoa[miel]=1
// mark them all ifcoa_id[]=1
// front[miel] will be used to calculate (potentially in parallel)
// how many elements with seuqnece numbers less than
// miel will be coarsened.
// skip[] marks that an element will no longer exist after merge.
if (test)
{
ifcoa[miel] = 1;
for (i = 0; i < 8; i++)
{
ifcoa_id[ntp[i]] = 1;
}
front[miel] = 1;
for (i = 0; i < 7; i++)
{
skip[ntp[i + 1]] = 1;
}
*if_coarsen = 1;
}
}
}
}
// compute front[iel], how many elements will be coarsened before iel
// (including iel)
parallel_add(front);
// num_coarsen is the total number of elements that will be coarsened
num_coarsen = front[nelt - 1];
// action[i] records the morton index of the i'th element (if it is an
// element's front-left-bottom-child) to be coarsened.
// create array mt_to_id to convert actual element index to morton index
for (miel = 0; miel < nelt; miel++)
{
iel = mt_to_id_old[miel];
if (!skip[iel])
{
if (ifcoa[miel])
{
action[front[miel] - 1] = miel;
mielnew = miel - (front[miel] - 1) * 7;
}
else
{
mielnew = miel - front[miel] * 7;
}
mt_to_id[mielnew] = iel;
}
}
// perform the coarsening procedure (potentially in parallel)
for (index = 0; index < num_coarsen; index++)
{
miel = action[index];
iel = mt_to_id_old[miel];
// find eight child elements to be coarsened
ntp[0] = iel;
ntp[1] = sje[iel][0][0][0];
ntp[2] = sje[iel][2][0][0];
ntp[3] = sje[ntp[2]][0][0][0];
ntp[4] = sje[iel][4][0][0];
ntp[5] = sje[ntp[4]][0][0][0];
ntp[6] = sje[ntp[4]][2][0][0];
ntp[7] = sje[ntp[6]][0][0][0];
// merge them to be the parent
merging(ntp);
}
nelt = nelt - num_coarsen * 7;
*icoarsen = *icoarsen + num_coarsen * 8;
}
//-------------------------------------------------------
// Refinement procedure
//--------------------------------------------------------
void do_refine(int *ifmortar, int *irefine)
{
double xctemp[8], yctemp[8], zctemp[8], xleft, xright;
double yleft, yright, zleft, zright, ta1temp[LX1][LX1][LX1];
double xhalf, yhalf, zhalf;
int iel, i, j, jface;
int ntemp, ndir, facedir, k, le[4], ne[4], mielnew;
int miel, num_refine, index, treetemp;
int ijeltemp[6][2], sjetemp[6][2][2], n1, n2, nelttemp;
int cb, cbctemp[6];
// initialize
ncopy(mt_to_id_old, mt_to_id, nelt);
nr_init(mt_to_id, nelt, -1);
nr_init(action, nelt, -1);
#pragma omp parallel for default(shared) private(miel) firstprivate(nelt, mt_to_id_old, ich)
for (miel = 0; miel < nelt; miel++)
{
if (ich[mt_to_id_old[miel]] != 4)
{
front[miel] = 0;
}
else
{
front[miel] = 1;
}
}
// front[iel] records how many elements with sequence numbers less than
// or equal to iel will be refined
parallel_add(front);
// num_refine is the total number of elements that will be refined
num_refine = front[nelt - 1];
// action[i] records the morton index of the i'th element to be refined
for (miel = 0; miel < nelt; miel++)
{
iel = mt_to_id_old[miel];
if (ich[iel] == 4)
{
action[front[miel] - 1] = miel;
}
}
// Compute array mt_to_id to convert the element index to morton index.
// ref_front_id[iel] records how many elements with index less than
// iel (actual element index, not morton index), will be refined.
for (miel = 0; miel < nelt; miel++)
{
iel = mt_to_id_old[miel];
if (ich[iel] == 4)
{
ntemp = (front[miel] - 1) * 7;
mielnew = miel + ntemp;
}
else
{
ntemp = front[miel] * 7;
mielnew = miel + ntemp;
}
mt_to_id[mielnew] = iel;
ref_front_id[iel] = nelt + ntemp;
}
// Perform refinement (potentially in parallel):
// - Cut an element into eight children.
// - Assign them element index as iel, nelt+1,...., nelt+7.
// - Update neighboring information.
nelttemp = nelt;
if (num_refine > 0)
{
*ifmortar = 1;
}
for (index = 0; index < num_refine; index++)
{
// miel is old morton index and mielnew is new morton index after refinement.
miel = action[index];
mielnew = miel + (front[miel] - 1) * 7;
iel = mt_to_id_old[miel];
nelt = nelttemp + (front[miel] - 1) * 7;
// save iel's information in a temporary array
treetemp = tree[iel];
copy(xctemp, xc[iel], 8);
copy(yctemp, yc[iel], 8);
copy(zctemp, zc[iel], 8);
ncopy(cbctemp, cbc[iel], 6);
ncopy((int *)ijeltemp, ijel[iel][0], 12);
ncopy((int *)sjetemp, sje[iel][0][0], 24);
copy((double *)ta1temp, ta1[iel][0][0], NXYZ);
// zero out iel here
tree[iel] = 0;
nr_init(cbc[iel], 6, 0);
nr_init(sje[iel][0][0], 24, -1);
nr_init(ijel[iel][0], 12, -1);
r_init(ta1[iel][0][0], NXYZ, 0.0);
// initialize new child elements:iel and nelt+1~nelt+7
for (j = 0; j < 7; j++)
{
mt_to_id[mielnew + j + 1] = nelt + j;
tree[nelt + j] = 0;
nr_init(cbc[nelt + j], 6, 0);
nr_init(sje[nelt + j][0][0], 24, -1);
nr_init(ijel[nelt + j][0], 12, -1);
r_init(ta1[nelt + j][0][0], NXYZ, 0.0);
}
// update the tree[]
ntemp = treetemp << 3;
tree[iel] = ntemp;
for (i = 0; i < 7; i++)
{
tree[nelt + i] = ntemp + ((i + 1) % 8);
}
// update the children's vertices' coordinates
xhalf = xctemp[0] + (xctemp[1] - xctemp[0]) / 2.0;
xleft = xctemp[0];
xright = xctemp[1];
yhalf = yctemp[0] + (yctemp[2] - yctemp[0]) / 2.0;
yleft = yctemp[0];
yright = yctemp[2];
zhalf = zctemp[0] + (zctemp[4] - zctemp[0]) / 2.0;
zleft = zctemp[0];
zright = zctemp[4];
for (j = 0; j < 7; j += 2)
{
for (i = 0; i < 7; i += 2)
{
xc[nelt + j][i] = xhalf;
xc[nelt + j][i + 1] = xright;
}
}
for (j = 1; j < 6; j += 2)
{
for (i = 0; i < 7; i += 2)
{
xc[nelt + j][i] = xleft;
xc[nelt + j][i + 1] = xhalf;
}
}
for (i = 0; i < 7; i += 2)
{
xc[iel][i] = xleft;
xc[iel][i + 1] = xhalf;
}
for (i = 0; i < 2; i++)
{
yc[nelt + 0][i] = yleft;
yc[nelt + 3][i] = yleft;
yc[nelt + 4][i] = yleft;
yc[nelt + 0][i + 4] = yleft;
yc[nelt + 3][i + 4] = yleft;
yc[nelt + 4][i + 4] = yleft;
}
for (i = 2; i < 4; i++)
{
yc[nelt + 0][i] = yhalf;
yc[nelt + 3][i] = yhalf;
yc[nelt + 4][i] = yhalf;
yc[nelt + 0][i + 4] = yhalf;
yc[nelt + 3][i + 4] = yhalf;
yc[nelt + 4][i + 4] = yhalf;
}
for (j = 1; j < 3; j++)
{
for (i = 0; i < 2; i++)
{
yc[nelt + j][i] = yhalf;
yc[nelt + j + 4][i] = yhalf;
yc[nelt + j][i + 4] = yhalf;
yc[nelt + j + 4][i + 4] = yhalf;
}
for (i = 2; i < 4; i++)
{
yc[nelt + j][i] = yright;
yc[nelt + j + 4][i] = yright;
yc[nelt + j][i + 4] = yright;
yc[nelt + j + 4][i + 4] = yright;
}
}
for (i = 0; i < 2; i++)
{
yc[iel][i] = yleft;
yc[iel][i + 4] = yleft;
}
for (i = 2; i < 4; i++)
{
yc[iel][i] = yhalf;
yc[iel][i + 4] = yhalf;
}
for (j = 0; j < 3; j++)
{
for (i = 0; i < 4; i++)
{
zc[nelt + j][i] = zleft;
zc[nelt + j][i + 4] = zhalf;
}
}
for (j = 3; j < 7; j++)
{
for (i = 0; i < 4; i++)
{
zc[nelt + j][i] = zhalf;
zc[nelt + j][i + 4] = zright;
}
}
for (i = 0; i < 4; i++)
{
zc[iel][i] = zleft;
zc[iel][i + 4] = zhalf;
}
// update the children's neighbor information
// ndir refers to the x,y,z directions, respectively.
// facedir refers to the orientation of the face in each direction,
// e.g. ndir=0, facedir=0 refers to face 1,
// and ndir =0, facedir=1 refers to face 2.
for (ndir = 0; ndir < 3; ndir++)
{
for (facedir = 0; facedir <= 1; facedir++)
{
i = 2 * ndir + facedir;
jface = jjface[i];
cb = cbctemp[i];
// find the new element indices of the four children on each
// face of the parent element
for (k = 0; k < 4; k++)
{
le[k] = le_arr[ndir][facedir][k] + nelt;
ne[k] = le_arr[ndir][1 - facedir][k] + nelt;
}
if (facedir == 0)
{
le[0] = iel;
}
else
{
ne[0] = iel;
}
// update neighbor information of the four child elements on each
// face of the parent element
for (k = 0; k < 4; k++)
{
cbc[le[k]][i] = 2;
sje[le[k]][i][0][0] = ne[k];
ijel[le[k]][i][0] = 0;
ijel[le[k]][i][1] = 0;
}
// if the face type of the parent element is type 2
if (cb == 2 )
{
ntemp = sjetemp[i][0][0];
// if the neighbor ntemp is not marked to be refined
if (ich[ntemp] != 4)
{
cbc[ntemp][jface] = 3;
ijel[ntemp][jface][0] = 0;
ijel[ntemp][jface][1] = 0;
for (k = 0; k < 4; k++)
{
cbc[ne[k]][i] = 1;
sje[ne[k]][i][0][0] = ntemp;
if (k == 0)
{
ijel[ne[k]][i][0] = 0;
ijel[ne[k]][i][1] = 0;
sje[ntemp][jface][0][0] = ne[k];
}
else if (k == 1)
{
ijel[ne[k]][i][0] = 0;
ijel[ne[k]][i][1] = 1;
sje[ntemp][jface][1][0] = ne[k];
}
else if (k == 2)
{
ijel[ne[k]][i][0] = 1;
ijel[ne[k]][i][1] = 0;
sje[ntemp][jface][0][1] = ne[k];
}
else if (k == 3)
{
ijel[ne[k]][i][0] = 1;
ijel[ne[k]][i][1] = 1;
sje[ntemp][jface][1][1] = ne[k];
}
}
// if the neighbor ntemp is also marked to be refined
}
else
{
n1 = ref_front_id[ntemp];
for (k = 0; k < 4; k++)
{
cbc[ne[k]][i] = 2;
n2 = n1 + le_arr[ndir][facedir][k];
if (n2 == n1 + 7) n2 = ntemp;
sje[ne[k]][i][0][0] = n2;
ijel[ne[k]][i][0] = 0;
}
}
// if the face type of the parent element is type 3
}
else if (cb == 3)
{
for (k = 0; k < 4; k++)
{
cbc[ne[k]][i] = 2;
if (k == 0)
{
ntemp = sjetemp[i][0][0];
}
else if (k == 1)
{
ntemp = sjetemp[i][1][0];
}
else if (k == 2)
{
ntemp = sjetemp[i][0][1];
}
else if (k == 3)
{
ntemp = sjetemp[i][1][1];
}
ijel[ne[k]][i][0] = 0;
ijel[ne[k]][i][1] = 0;
sje[ne[k]][i][0][0] = ntemp;
cbc[ntemp][jface] = 2;
sje[ntemp][jface][0][0] = ne[k];
ijel[ntemp][jface][0] = 0;
ijel[ntemp][jface][1] = 0;
}
// if the face type of the parent element is type 0
}
else if (cb == 0)
{
for (k = 0; k < 4; k++)
{
cbc[ne[k]][i] = cb;
}
}
}
}
// map solution from parent element to children
remap(ta1[iel], &ta1[ref_front_id[iel]], ta1temp);
}
nelt = nelttemp + num_refine * 7;
*irefine = *irefine + num_refine;
ntot = nelt * LX1 * LX1 * LX1;
}
//-----------------------------------------------------------
// returns whether element n1's face i and element n2's
// jjface[iface] have intersections, i.e. whether n1 and
// n2 are neighbored by an edge.
//-----------------------------------------------------------
int ifcor(int n1, int n2, int i, int iface)
{
int ret;
ret = 0;
if (ifsame(n1, e1v1[i][iface], n2, e2v1[i][iface]) ||
ifsame(n1, e1v2[i][iface], n2, e2v2[i][iface]))
{
ret = 1;
}
return ret;
}
//-----------------------------------------------------------
// Check whether element ie's three faces (sharing vertex n)
// are nonconforming. This will prevent it from being coarsened.
// Also check ie's neighbors on those three faces, whether ie's
// neighbors by only an edge have a size smaller than ie's,
// which also prevents ie from being coarsened.
//-----------------------------------------------------------
int icheck(int ie, int n)
{
int ntemp1, ntemp2, ntemp3, n1, n2, n3;
int cb2_1, cb3_1, cb1_2, cb3_2, cb1_3, cb2_3;
int ret;
ret = 1;
cb2_1 = 0;
cb3_1 = 0;
cb1_2 = 0;
cb3_2 = 0;
cb1_3 = 0;
cb2_3 = 0;
n1 = f_c[n][0];
n2 = f_c[n][1];
n3 = f_c[n][2];
if ((cbc[ie][n1] == 3) || (cbc[ie][n2] == 3) || (cbc[ie][n3] == 3))
{
ret = 0;
}
else
{
ntemp1 = sje[ie][n1][0][0];
ntemp2 = sje[ie][n2][0][0];
ntemp3 = sje[ie][n3][0][0];
if (ntemp1 != 0)
{
cb2_1 = cbc[ntemp1][n2];
cb3_1 = cbc[ntemp1][n3];
}
if (ntemp2 != 0)
{
cb3_2 = cbc[ntemp2][n3];
cb1_2 = cbc[ntemp2][n1];
}
if (ntemp3 != 0)
{
cb1_3 = cbc[ntemp3][n1];
cb2_3 = cbc[ntemp3][n2];
}
if ((cbc[ie][n1] == 2 && (cb2_1 == 3 || cb3_1 == 3)) ||
(cbc[ie][n2] == 2 && (cb3_2 == 3 || cb1_2 == 3)) ||
(cbc[ie][n3] == 2 && (cb1_3 == 3 || cb2_3 == 3)))
{
ret = 0;
}
}
return ret;
}
//-----------------------------------------------------------
// Search elements to be coarsened. Check with restrictions.
// This subroutine only checks the element itself, not its
// neighbors.
//-----------------------------------------------------------
void find_coarsen(int *if_coarsen, int neltold)
{
int iftemp;
int iel, i;
*if_coarsen = 0;
#pragma omp parallel for default(shared) private(iel, i, iftemp) firstprivate(neltold, skip, cbc)
for (iel = 0; iel < neltold; iel++)
{
if (!skip[iel])
{
ich[iel] = 0;
if (!iftouch(iel))
{
iftemp = 0;
for (i = 0; i < NSIDES; i++)
{
// if iel has a larger size than its face neighbors, it
// can not be coarsened
if (cbc[iel][i] == 3)
{
iftemp = 1;
}
}
if (!iftemp)
{
*if_coarsen = 1;
ich[iel] = 2;
}
}
}
}
}
//-----------------------------------------------------------
// search elements to be refined based on whether they
// have overlap with the heat source
//-----------------------------------------------------------
void find_refine(int *if_refine)
{
int iel;
*if_refine = 0;
#pragma omp parallel for default(shared) private(iel) firstprivate(nelt, dlmin, xc)
for (iel = 0; iel < nelt; iel++)
{
ich[iel] = 0;
if (iftouch(iel))
{
if ((xc[iel][1] - xc[iel][0]) > dlmin)
{
*if_refine = 1;
ich[iel] = 4;
}
}
}
}
//-----------------------------------------------------------------
// Check whether the potential refinement will violate the
// restriction. If so, mark the neighbor and unmark the
// original element, and set ifrepeat 1. i.e. this procedure
// needs to be repeated until no further check is needed
//-----------------------------------------------------------------
void check_refine(int *ifrepeat)
{
int iel, iface, ntemp, nntemp, i, jface;
*ifrepeat = 0;
for (iel = 0; iel < nelt; iel++)
{
// if iel is marked to be refined
if (ich[iel] == 4)
{
// check its six faces
for (i = 0; i < NSIDES; i++)
{
jface = jjface[i];
ntemp = sje[iel][i][0][0];
// if one face neighbor is larger in size than iel
if (cbc[iel][i] == 1)
{
// unmark iel
ich[iel] = 0;
// the large size neighbor ntemp is marked to be refined
if (ich[ntemp] != 4)
{
*ifrepeat = 1;
ich[ntemp] = 4;
}
// check iel's neighbor, neighbored by an edge on face i, which
// must be a face neighbor of ntemp
for (iface = 0; iface < NSIDES; iface++)
{
if (iface != i && iface != jface)
{
//if edge neighbors are larger than iel, mark them to be refined
if (cbc[ntemp][iface] == 2)
{
nntemp = sje[ntemp][iface][0][0];
// ifcor is to make sure the edge neighbor exist
if (ich[nntemp] != 4 && ifcor(iel, nntemp, i, iface))
{
ich[nntemp] = 4;
}
}
}
}
//if face neighbor are of the same size of iel, check edge neighbors
}
else if (cbc[iel][i] == 2)
{
for (iface = 0; iface < NSIDES; iface++)
{
if (iface != i && iface != jface)
{
if (cbc[ntemp][iface] == 1)
{
nntemp = sje[ntemp][iface][0][0];
ich[nntemp] = 4;
ich[iel] = 0;
*ifrepeat = 1;
}
}
}
}
}
}
}
}
//-----------------------------------------------------------------
// check whether element iel has overlap with the heat source
//-----------------------------------------------------------------
int iftouch(int iel)
{
double dis, dis1, dis2, dis3, alpha2;
alpha2 = alpha * alpha;
if (x0 < xc[iel][0])
{
dis1 = xc[iel][0] - x0;
}
else if (x0 > xc[iel][1])
{
dis1 = x0 - xc[iel][1];
}
else
{
dis1 = 0.0;
}
if (_y0 < yc[iel][0])
{
dis2 = yc[iel][0] - _y0;
}
else if (_y0 > yc[iel][2])
{
dis2 = _y0 - yc[iel][2];
}
else
{
dis2 = 0.0;
}
if (z0 < zc[iel][0])
{
dis3 = zc[iel][0] - z0;
}
else if (z0 > zc[iel][4])
{
dis3 = z0 - zc[iel][4];
}
else
{
dis3 = 0.0;
}
dis = dis1 * dis1 + dis2 * dis2 + dis3 * dis3;
if (dis < alpha2)
{
return 1;
}
else
{
return 0;
}
}
//-----------------------------------------------------------------
// After a refinement, map the solution from the parent (x) to
// the eight children. y is the solution on the first child
// (front-bottom-left) and y1 is the solution on the next 7
// children.
//-----------------------------------------------------------------
void remap(double y[LX1][LX1][LX1], double y1[7][LX1][LX1][LX1],
double x[LX1][LX1][LX1])
{
double yone[2][LX1][LX1][LX1], ytwo[4][LX1][LX1][LX1];
int i, iz, ii, jj, kk;
r_init((double *)y, LX1 * LX1 * LX1, 0.0);
r_init((double *)y1, LX1 * LX1 * LX1 * 7, 0.0);
r_init((double *)yone, LX1 * LX1 * LX1 * 2, 0.0);
r_init((double *)ytwo, LX1 * LX1 * LX1 * 4, 0.0);
for (i = 0; i < LX1; i++)
{
for (kk = 0; kk < LX1; kk++)
{
for (jj = 0; jj < LX1; jj++)
{
for (ii = 0; ii < LX1; ii++)
{
yone[0][i][jj][ii] = yone[0][i][jj][ii] + ixmc1[kk][ii] * x[i][jj][kk];
yone[1][i][jj][ii] = yone[1][i][jj][ii] + ixmc2[kk][ii] * x[i][jj][kk];
}
}
}
for (kk = 0; kk < LX1; kk++)
{
for (jj = 0; jj < LX1; jj++)
{
for (ii = 0; ii < LX1; ii++)
{
ytwo[0][jj][i][ii] = ytwo[0][jj][i][ii] +
yone[0][i][kk][ii] * ixtmc1[jj][kk];
ytwo[1][jj][i][ii] = ytwo[1][jj][i][ii] +
yone[0][i][kk][ii] * ixtmc2[jj][kk];
ytwo[2][jj][i][ii] = ytwo[2][jj][i][ii] +
yone[1][i][kk][ii] * ixtmc1[jj][kk];
ytwo[3][jj][i][ii] = ytwo[3][jj][i][ii] +
yone[1][i][kk][ii] * ixtmc2[jj][kk];
}
}
}
}
for (iz = 0; iz < LX1; iz++)
{
for (kk = 0; kk < LX1; kk++)
{
for (jj = 0; jj < LX1; jj++)
{
for (ii = 0; ii < LX1; ii++)
{
y[jj][iz][ii] = y[jj][iz][ii] +
ytwo[0][iz][kk][ii] * ixtmc1[jj][kk];
y1[0][jj][iz][ii] = y1[0][jj][iz][ii] +
ytwo[2][iz][kk][ii] * ixtmc1[jj][kk];
y1[1][jj][iz][ii] = y1[1][jj][iz][ii] +
ytwo[1][iz][kk][ii] * ixtmc1[jj][kk];
y1[2][jj][iz][ii] = y1[2][jj][iz][ii] +
ytwo[3][iz][kk][ii] * ixtmc1[jj][kk];
y1[3][jj][iz][ii] = y1[3][jj][iz][ii] +
ytwo[0][iz][kk][ii] * ixtmc2[jj][kk];
y1[4][jj][iz][ii] = y1[4][jj][iz][ii] +
ytwo[2][iz][kk][ii] * ixtmc2[jj][kk];
y1[5][jj][iz][ii] = y1[5][jj][iz][ii] +
ytwo[1][iz][kk][ii] * ixtmc2[jj][kk];
y1[6][jj][iz][ii] = y1[6][jj][iz][ii] +
ytwo[3][iz][kk][ii] * ixtmc2[jj][kk];
}
}
}
}
}
//-----------------------------------------------------------------------
// This subroutine is to merge the eight child elements and map
// the solution from eight children to the merged element.
// iela array records the eight elements to be merged.
//-----------------------------------------------------------------------
void merging(int iela[8])
{
double x1, x2, y1, y2, z1, z2;
int ielnew, i, ntemp, jface, ii, cb, ntempa[4], ielold, ntema[4];
ielnew = iela[0];
tree[ielnew] = tree[ielnew] >> 3;
// element vertices
x1 = xc[iela[0]][0];
x2 = xc[iela[1]][1];
y1 = yc[iela[0]][0];
y2 = yc[iela[2]][2];
z1 = zc[iela[0]][0];
z2 = zc[iela[4]][4];
for (i = 0; i < 7; i += 2)
{
xc[ielnew][i] = x1;
}
for (i = 1; i < 8; i += 2)
{
xc[ielnew][i] = x2;
}
for (i = 0; i < 2; i++)
{
yc[ielnew][i] = y1;
yc[ielnew][i + 4] = y1;
}
for (i = 2; i < 4; i++)
{
yc[ielnew][i] = y2;
yc[ielnew][i + 4] = y2;
}
for (i = 0; i < 4; i++)
{
zc[ielnew][i] = z1;
}
for (i = 4; i < 8; i++)
{
zc[ielnew][i] = z2;
}
// update neighboring information
for (i = 0; i < NSIDES; i++)
{
jface = jjface[i];
ielold = iela[children[i][0]];
for (ii = 0; ii < 4; ii++)
{
ntempa[ii] = iela[children[i][ii]];
}
cb = cbc[ielold][i];
if (cb == 2)
{
// if the neighbor elements also will be coarsened
if (ifcoa_id[sje[ielold][i][0][0]])
{
if (i == 1 || i == 3 || i == 5)
{
ntemp = sje[sje[ntempa[0]][i][0][0]][i][0][0];
}
else
{
ntemp = sje[ntempa[0]][i][0][0];
}
sje[ielnew][i][0][0] = ntemp;
ijel[ielnew][i][0] = 0;
ijel[ielnew][i][1] = 0;
cbc[ielnew][i] = 2;
// if the neighbor elements will not be coarsened
}
else
{
for (ii = 0; ii < 4; ii++)
{
ntema[ii] = sje[ntempa[ii]][i][0][0];
cbc[ntema[ii]][jface] = 1;
sje[ntema[ii]][jface][0][0] = ielnew;
ijel[ntema[ii]][jface][0] = iijj[ii][0];
ijel[ntema[ii]][jface][1] = iijj[ii][1];
sje[ielnew][i][iijj[ii][1]][iijj[ii][0]] = ntema[ii];
ijel[ielnew][i][0] = 0;
ijel[ielnew][i][1] = 0;
}
cbc[ielnew][i] = 3;
}
}
else if (cb == 1)
{
ntemp = sje[ielold][i][0][0];
cbc[ntemp][jface] = 2;
ijel[ntemp][jface][0] = 0;
ijel[ntemp][jface][1] = 0;
sje[ntemp][jface][0][0] = ielnew;
sje[ntemp][jface][1][0] = -1;
sje[ntemp][jface][0][1] = -1;
sje[ntemp][jface][1][1] = -1;
cbc[ielnew][i] = 2;
ijel[ielnew][i][0] = 0;
ijel[ielnew][i][1] = 0;
sje[ielnew][i][0][0] = ntemp;
}
else if (cb == 0)
{
cbc[ielnew][i] = 0;
sje[ielnew][i][0][0] = -1;
sje[ielnew][i][1][0] = -1;
sje[ielnew][i][0][1] = -1;
sje[ielnew][i][1][1] = -1;
}
}
// map solution from children to the merged element
remap2(iela, ielnew);
}
//-----------------------------------------------------------------
// Map the solution from the children to the parent.
// iela array records the eight elements to be merged.
// ielnew is the element index of the merged element.
//-----------------------------------------------------------------
void remap2(int iela[8], int ielnew)
{
double temp1[LX1][LX1][LX1], temp2[LX1][LX1][LX1];
double temp3[LX1][LX1][LX1], temp4[LX1][LX1][LX1];
double temp5[LX1][LX1][LX1], temp6[LX1][LX1][LX1];
remapx(ta1[iela[0]], ta1[iela[1]], temp1);
remapx(ta1[iela[2]], ta1[iela[3]], temp2);
remapx(ta1[iela[4]], ta1[iela[5]], temp3);
remapx(ta1[iela[6]], ta1[iela[7]], temp4);
remapy(temp1, temp2, temp5);
remapy(temp3, temp4, temp6);
remapz(temp5, temp6, ta1[ielnew]);
}
//-----------------------------------------------------------------
// z direction mapping after the merge.
// Map solution from x1 & x2 to y.
//-----------------------------------------------------------------
void remapz(double x1[LX1][LX1][LX1], double x2[LX1][LX1][LX1],
double y[LX1][LX1][LX1])
{
int ix, iy, ip;
for (iy = 0; iy < LX1; iy++)
{
for (ix = 0; ix < LX1; ix++)
{
y[0][iy][ix] = x1[0][iy][ix];
y[1][iy][ix] = 0.0;
for (ip = 0; ip < LX1; ip++)
{
y[1][iy][ix] = y[1][iy][ix] + map2[ip] * x1[ip][iy][ix];
}
y[2][iy][ix] = x1[LX1 - 1][iy][ix];
y[3][iy][ix] = 0.0;
for (ip = 0; ip < LX1; ip++)
{
y[3][iy][ix] = y[3][iy][ix] + map4[ip] * x2[ip][iy][ix];
}
y[LX1 - 1][iy][ix] = x2[LX1 - 1][iy][ix];
}
}
}
//-----------------------------------------------------------------
// y direction mapping after the merge.
// Map solution from x1 & x2 to y.
//-----------------------------------------------------------------
void remapy(double x1[LX1][LX1][LX1], double x2[LX1][LX1][LX1],
double y[LX1][LX1][LX1])
{
int ix, iz, ip;
for (iz = 0; iz < LX1; iz++)
{
for (ix = 0; ix < LX1; ix++)
{
y[iz][0][ix] = x1[iz][0][ix];
y[iz][1][ix] = 0.0;
for (ip = 0; ip < LX1; ip++)
{
y[iz][1][ix] = y[iz][1][ix] + map2[ip] * x1[iz][ip][ix];
}
y[iz][2][ix] = x1[iz][LX1 - 1][ix];
y[iz][3][ix] = 0.0;
for (ip = 0; ip < LX1; ip++)
{
y[iz][3][ix] = y[iz][3][ix] + map4[ip] * x2[iz][ip][ix];
}
y[iz][LX1 - 1][ix] = x2[iz][LX1 - 1][ix];
}
}
}
//-----------------------------------------------------------------
// x direction mapping after the merge.
// Map solution from x1 & x2 to y.
//-----------------------------------------------------------------
void remapx(double x1[LX1][LX1][LX1], double x2[LX1][LX1][LX1],
double y[LX1][LX1][LX1])
{
int iy, iz, ip;
for (iz = 0; iz < LX1; iz++)
{
for (iy = 0; iy < LX1; iy++)
{
y[iz][iy][0] = x1[iz][iy][0];
y[iz][iy][1] = 0.0;
for (ip = 0; ip < LX1; ip++)
{
y[iz][iy][1] = y[iz][iy][1] + map2[ip] * x1[iz][iy][ip];
}
y[iz][iy][2] = x1[iz][iy][LX1 - 1];
y[iz][iy][3] = 0.0;
for (ip = 0; ip < LX1; ip++)
{
y[iz][iy][3] = y[iz][iy][3] + map4[ip] * x2[iz][iy][ip];
}
y[iz][iy][LX1 - 1] = x2[iz][iy][LX1 - 1];
}
}
}
//---------------------------------------------------------
// Advance the convection term using 4th order RK
// 1.ta1 is solution from last time step
// 2.the heat source is considered part of d/dx
// 3.trhs is right hand side for the diffusion equation
// 4.tmor is solution on mortar points, which will be used
// as the initial guess when advancing the diffusion term
//---------------------------------------------------------
void convect(int ifmortar)
{
double alpha2, tempa[LX1][LX1][LX1], rdtime, pidivalpha;
double dtx1, dtx2, dtx3, src, rk1[LX1][LX1][LX1];
double rk2[LX1][LX1][LX1], rk3[LX1][LX1][LX1], rk4[LX1][LX1][LX1];
double temp[LX1][LX1][LX1], subtime[3], xx0[3], yy0[3], zz0[3];
double dtime2, r2, sum, xloc[LX1], yloc[LX1], zloc[LX1];
int k, iel, i, j, iside, isize, substep, ip;
const double sixth = 1.0 / 6.0;
pidivalpha = acos(-1.0) / alpha;
alpha2 = alpha * alpha;
dtime2 = dtime / 2.0;
rdtime = 1.0 / dtime;
subtime[0] = time;
subtime[1] = time + dtime2;
subtime[2] = time + dtime;
for (substep = 0; substep < 3; substep++)
{
xx0[substep] = X00 + VELX * subtime[substep];
yy0[substep] = Y00 + VELY * subtime[substep];
zz0[substep] = Z00 + VELZ * subtime[substep];
}
#pragma omp parallel for default(shared) private(iel, i, j, k, ip, iside, isize, r2, src, sum, dtx1, dtx2, dtx3) firstprivate(nelt, alpha2, pidivalpha, dtime2, dtime, sixth, rdtime, size_e, xc, xfrac, yc, zc, xx0, yy0, zz0, dxm1, xrm1_s, cbc, bm1_s, xloc, yloc, zloc, rk1, temp, rk2, tempa, rk3, rk4)
for (iel = 0; iel < nelt; iel++)
{
isize = size_e[iel];
/*
xloc[i] is the location of i'th collocation in x direction in an element.
yloc[i] is the location of j'th collocation in y direction in an element.
zloc[i] is the location of k'th collocation in z direction in an element.
*/
for (i = 0; i < LX1; i++)
{
xloc[i] = xfrac[i] * (xc[iel][1] - xc[iel][0]) + xc[iel][0];
}
for (j = 0; j < LX1; j++)
{
yloc[j] = xfrac[j] * (yc[iel][3] - yc[iel][0]) + yc[iel][0];
}
for (k = 0; k < LX1; k++)
{
zloc[k] = xfrac[k] * (zc[iel][4] - zc[iel][0]) + zc[iel][0];
}
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
r2 = pow(xloc[i] - xx0[0], 2.0) + pow(yloc[j] - yy0[0], 2.0) +
pow(zloc[k] - zz0[0], 2.0);
if (r2 <= alpha2)
{
src = cos(sqrt(r2) * pidivalpha) + 1.0;
}
else
{
src = 0.0;
}
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][i] * ta1[iel][k][j][ip];
}
dtx1 = -VELX * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][j] * ta1[iel][k][ip][i];
}
dtx2 = -VELY * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][k] * ta1[iel][ip][j][i];
}
dtx3 = -VELZ * sum * xrm1_s[isize][k][j][i];
rk1[k][j][i] = dtx1 + dtx2 + dtx3 + src;
temp[k][j][i] = ta1[iel][k][j][i] + dtime2 * rk1[k][j][i];
}
}
}
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
r2 = pow(xloc[i] - xx0[1], 2.0) + pow(yloc[j] - yy0[1], 2.0) +
pow(zloc[k] - zz0[1], 2.0);
if (r2 <= alpha2)
{
src = cos(sqrt(r2) * pidivalpha) + 1.0;
}
else
{
src = 0.0;
}
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][i] * temp[k][j][ip];
}
dtx1 = -VELX * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][j] * temp[k][ip][i];
}
dtx2 = -VELY * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][k] * temp[ip][j][i];
}
dtx3 = -VELZ * sum * xrm1_s[isize][k][j][i];
rk2[k][j][i] = dtx1 + dtx2 + dtx3 + src;
tempa[k][j][i] = ta1[iel][k][j][i] + dtime2 * rk2[k][j][i];
}
}
}
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
r2 = pow(xloc[i] - xx0[1], 2.0) + pow(yloc[j] - yy0[1], 2.0) +
pow(zloc[k] - zz0[1], 2.0);
if (r2 <= alpha2)
{
src = cos(sqrt(r2) * pidivalpha) + 1.0;
}
else
{
src = 0.0;
}
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][i] * tempa[k][j][ip];
}
dtx1 = -VELX * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][j] * tempa[k][ip][i];
}
dtx2 = -VELY * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][k] * tempa[ip][j][i];
}
dtx3 = -VELZ * sum * xrm1_s[isize][k][j][i];
rk3[k][j][i] = dtx1 + dtx2 + dtx3 + src;
temp[k][j][i] = ta1[iel][k][j][i] + dtime * rk3[k][j][i];
}
}
}
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
r2 = pow(xloc[i] - xx0[2], 2.0) + pow(yloc[j] - yy0[2], 2.0) +
pow(zloc[k] - zz0[2], 2.0);
if (r2 <= alpha2)
{
src = cos(sqrt(r2) * pidivalpha) + 1.0;
}
else
{
src = 0.0;
}
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][i] * temp[k][j][ip];
}
dtx1 = -VELX * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][j] * temp[k][ip][i];
}
dtx2 = -VELY * sum * xrm1_s[isize][k][j][i];
sum = 0.0;
for (ip = 0; ip < LX1; ip++)
{
sum = sum + dxm1[ip][k] * temp[ip][j][i];
}
dtx3 = -VELZ * sum * xrm1_s[isize][k][j][i];
rk4[k][j][i] = dtx1 + dtx2 + dtx3 + src;
tempa[k][j][i] = sixth * (rk1[k][j][i] + 2.0 *
rk2[k][j][i] + 2.0 * rk3[k][j][i] + rk4[k][j][i]);
}
}
}
// apply boundary condition
for (iside = 0; iside < NSIDES; iside++)
{
if (cbc[iel][iside] == 0)
{
facev(tempa, iside, 0.0);
}
}
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
trhs[iel][k][j][i] = bm1_s[isize][k][j][i] * (ta1[iel][k][j][i] * rdtime +
tempa[k][j][i]);
ta1[iel][k][j][i] = ta1[iel][k][j][i] + tempa[k][j][i] * dtime;
}
}
}
}
// get mortar for intial guess for CG
if (ifmortar)
{
transfb_c_2((double *)ta1);
}
else
{
transfb_c((double *)ta1);
}
#pragma omp parallel for default(shared) private(i) firstprivate(nmor, mormult)
for (i = 0; i < nmor; i++)
{
tmort[i] = tmort[i] / mormult[i];
}
}
//---------------------------------------------------------------------
// advance the diffusion term using CG iterations
//---------------------------------------------------------------------
void diffusion(int ifmortar)
{
double rho_aux, rho1, rho2, beta, cona;
int iter, ie, im, iside, i, j, k;
// set up diagonal preconditioner
if (ifmortar)
{
setuppc();
setpcmo();
}
// arrays t and umor are accumlators of (am pm) in the CG algorithm
// (see the specification)
r_init((double *)t, ntot, 0.0);
r_init((double *)umor, nmor, 0.0);
// calculate initial am (see specification) in CG algorithm
// trhs and rmor are combined to generate r0 in CG algorithm.
// pdiff and pmorx are combined to generate q0 in the CG algorithm.
// rho1 is (qm,rm) in the CG algorithm.
rho1 = 0.0;
#pragma omp parallel for default(shared) private(ie, k, j, i) firstprivate(nelt, dpcelm, trhs, tmult) reduction(+ : rho1)
for (ie = 0; ie < nelt; ie++)
{
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
pdiff[ie][k][j][i] = dpcelm[ie][k][j][i] * trhs[ie][k][j][i];
rho1 = rho1 + trhs[ie][k][j][i] * pdiff[ie][k][j][i] *
tmult[ie][k][j][i];
}
}
}
}
#pragma omp parallel for default(shared) private(im) firstprivate(nmor, dpcmor, rmor) reduction(+ : rho1)
for (im = 0; im < nmor; im++)
{
pmorx[im] = dpcmor[im] * rmor[im];
rho1 = rho1 + rmor[im] * pmorx[im];
}
//.................................................................
// commence conjugate gradient iteration
//.................................................................
for (iter = 1; iter <= nmxh; iter++)
{
if (iter > 1)
{
rho_aux = 0.0;
// pdiffp and ppmor are combined to generate q_m+1 in the specification
// rho_aux is (q_m+1,r_m+1)
#pragma omp parallel for default(shared) private(ie, k, j, i) firstprivate(nelt, dpcelm, trhs, tmult) reduction(+ : rho_aux)
for (ie = 0; ie < nelt; ie++)
{
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
pdiffp[ie][k][j][i] = dpcelm[ie][k][j][i] * trhs[ie][k][j][i];
rho_aux = rho_aux + trhs[ie][k][j][i] * pdiffp[ie][k][j][i] *
tmult[ie][k][j][i];
}
}
}
}
#pragma omp parallel for default(shared) private(im) firstprivate(nmor, dpcmor, rmor) reduction(+ : rho_aux)
for (im = 0; im < nmor; im++)
{
ppmor[im] = dpcmor[im] * rmor[im];
rho_aux = rho_aux + rmor[im] * ppmor[im];
}
// compute bm (beta) in the specification
rho2 = rho1;
rho1 = rho_aux;
beta = rho1 / rho2;
// update p_m+1 in the specification
adds1m1((double *)pdiff, (double *)pdiffp, beta, ntot);
adds1m1(pmorx, ppmor, beta, nmor);
}
// compute matrix vector product: (theta pm) in the specification
transf(pmorx, (double *)pdiff);
// compute pdiffp which is (A theta pm) in the specification
for (ie = 0; ie < nelt; ie++)
{
laplacian(pdiffp[ie], pdiff[ie], size_e[ie]);
}
// compute ppmor which will be used to compute (thetaT A theta pm)
// in the specification
transfb(ppmor, (double *)pdiffp);
// apply boundary condition
#pragma omp parallel for default(shared) private(ie, iside) firstprivate(nelt, cbc)
for (ie = 0; ie < nelt; ie++)
{
for (iside = 0; iside < NSIDES; iside++)
{
if (cbc[ie][iside] == 0)
{
facev(pdiffp[ie], iside, 0.0);
}
}
}
// compute cona which is (pm,theta T A theta pm)
cona = 0.0;
#pragma omp parallel for default(shared) private(ie, k, j, i) firstprivate(nelt, pdiff, pdiffp, tmult) reduction(+ : cona)
for (ie = 0; ie < nelt; ie++)
{
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
cona = cona + pdiff[ie][k][j][i] *
pdiffp[ie][k][j][i] * tmult[ie][k][j][i];
}
}
}
}
#pragma omp parallel for default(shared) private(im) firstprivate(nmor, tmmor, pmorx) reduction(+ : cona)
for (im = 0; im < nmor; im++)
{
ppmor[im] = ppmor[im] * tmmor[im];
cona = cona + pmorx[im] * ppmor[im];
}
// compute am
cona = rho1 / cona;
// compute (am pm)
adds2m1((double *)t, (double *)pdiff, cona, ntot);
adds2m1(umor, pmorx, cona, nmor);
// compute r_m+1
adds2m1((double *)trhs, (double *)pdiffp, -cona, ntot);
adds2m1(rmor, ppmor, -cona, nmor);
}
transf(umor, (double *)t);
}
//------------------------------------------------------------------
// compute r = visc*[A]x +[B]x on a given element.
//------------------------------------------------------------------
void laplacian(double r[LX1][LX1][LX1], double u[LX1][LX1][LX1], int sizei)
{
double rdtime;
int i, j, k, iz;
double tm1[LX1][LX1][LX1], tm2[LX1][LX1][LX1];
rdtime = 1.0 / dtime;
r_init((double *)tm1, NXYZ, 0.0);
for (iz = 0; iz < LX1; iz++)
{
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
tm1[iz][j][i] = tm1[iz][j][i] + wdtdr[k][i] * u[iz][j][k];
}
}
}
}
r_init((double *)tm2, NXYZ, 0.0);
for (iz = 0; iz < LX1; iz++)
{
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
tm2[iz][j][i] = tm2[iz][j][i] + u[iz][k][i] * wdtdr[j][k];
}
}
}
}
r_init((double *)r, NXYZ, 0.0);
for (k = 0; k < LX1; k++)
{
for (iz = 0; iz < LX1; iz++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
r[iz][j][i] = r[iz][j][i] + u[k][j][i] * wdtdr[iz][k];
}
}
}
}
// collocate with remaining weights and sum to complete factorization.
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
r[k][j][i] = VISC * (tm1[k][j][i] * g4m1_s[sizei][k][j][i] +
tm2[k][j][i] * g5m1_s[sizei][k][j][i] +
r[k][j][i] * g6m1_s[sizei][k][j][i]) +
bm1_s[sizei][k][j][i] * rdtime * u[k][j][i];
}
}
}
}
//-----------------------------------------------------------------
// generate mortar point index number
//-----------------------------------------------------------------
void mortar()
{
int count, iel, jface, ntemp, i, ii, jj, ntemp1;
int iii, jjj, face2, ne, ie, edge_g, ie2;
int mor_v[3], cb, cb1, cb2, cb3, cb4, cb5, cb6;
int space, sumcb, ij1, ij2, n1, n2, n3, n4, n5;
n1 = LX1 * LX1 * 6 * 4 * nelt;
nr_init((int *)idmo, n1, -1);
n2 = 8 * nelt;
nr_init(nemo, n2, -1);
nr_init((int *)vassign, n2, -1);
n3 = 2 * 64 * nelt;
nr_init((int *)emo, n3, -1);
n4 = 12 * nelt;
l_init((int *)if_1_edge, n4, 0);
n5 = 2 * 12 * nelt;
nr_init((int *)diagn, n5, -1) ;
// Mortar points indices are generated in two steps: first generate
// them for all element vertices (corner points), then for conforming
// edge and conforming face interiors. Each time a new mortar index
// is generated for a mortar point, it is broadcast to all elements
// sharing this mortar point.
// VERTICES
count = -1;
// assign mortar point indices to element vertices
#pragma omp parallel for default(shared) private(iel, cb, cb1, cb2, sumcb, ij1, ij2, ntemp, ntemp1) firstprivate(nelt, cbc, ijel, sje)
for (iel = 0; iel < nelt; iel++)
{
// first calculate how many new mortar indices will be generated for
// each element.
// For each element, at least one vertex (vertex 7) will be new mortar
// point. All possible new mortar points will be on face 1,3 or 5. By
// checking the type of these three faces, we are able to tell
// how many new mortar vertex points will be generated in each element.
cb = cbc[iel][5];
cb1 = cbc[iel][3];
cb2 = cbc[iel][1];
// For different combinations of the type of these three faces,
// we group them into 27 configurations.
// For different face types we assign the following integers:
// 1 for type 2 or 3
// 2 for type 0
// 5 for type 1
// By summing these integers for faces 1,3 and 5, sumcb will have
// 10 different numbers indicating 10 different combinations.
sumcb = 0;
if (cb == 2 || cb == 3)
{
sumcb = sumcb + 1;
}
else if (cb == 0)
{
sumcb = sumcb + 2;
}
else if (cb == 1)
{
sumcb = sumcb + 5;
}
if (cb1 == 2 || cb1 == 3)
{
sumcb = sumcb + 1;
}
else if (cb1 == 0)
{
sumcb = sumcb + 2;
}
else if (cb1 == 1)
{
sumcb = sumcb + 5;
}
if (cb2 == 2 || cb2 == 3)
{
sumcb = sumcb + 1;
}
else if (cb2 == 0)
{
sumcb = sumcb + 2;
}
else if (cb2 == 1)
{
sumcb = sumcb + 5;
}
// compute newc[iel]
// newc[iel] records how many new mortar indices will be generated
// for element iel
// vassign[iel][i] records the element vertex of the i'th new mortar
// vertex point for element iel. e.g. vassign[iel][1]=8 means
// the 2nd new mortar vertex point generated on element
// iel is iel's 8th vertex.
if (sumcb == 3)
{
// the three face types for face 1,3, and 5 are 2 2 2
newc[iel] = 1;
vassign[iel][0] = 7;
}
else if (sumcb == 4)
{
// the three face types for face 1,3 and 5 are 2 2 0 (not
// necessarily in this order)
newc[iel] = 2;
if (cb == 0)
{
vassign[iel][0] = 3;
}
else if (cb1 == 0)
{
vassign[iel][0] = 5;
}
else if (cb2 == 0)
{
vassign[iel][0] = 6;
}
vassign[iel][1] = 7;
}
else if (sumcb == 7)
{
// the three face types for face 1,3 and 5 are 2 2 1 (not
// necessarily in this order)
if (cb == 1)
{
ij1 = ijel[iel][5][0];
ij2 = ijel[iel][5][1];
if (ij1 == 0 && ij2 == 0)
{
newc[iel] = 2;
vassign[iel][0] = 3;
vassign[iel][1] = 7;
}
else if (ij1 == 0 && ij2 == 1)
{
ntemp = sje[iel][5][0][0];
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 3;
vassign[iel][1] = 7;
}
}
else if (ij1 == 1 && ij2 == 0)
{
ntemp = sje[iel][5][0][0];
if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 3;
vassign[iel][1] = 7;
}
}
else
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
}
else if (cb1 == 1)
{
ij1 = ijel[iel][3][0];
ij2 = ijel[iel][3][1];
if (ij1 == 0 && ij2 == 0)
{
newc[iel] = 2;
vassign[iel][0] = 5;
vassign[iel][1] = 7;
}
else if (ij1 == 0 && ij2 == 1)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 5;
vassign[iel][1] = 7;
}
}
else if (ij1 == 1 && ij2 == 0)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 5;
vassign[iel][1] = 7;
}
}
else
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
}
else if (cb2 == 1)
{
ij1 = ijel[iel][1][0];
ij2 = ijel[iel][1][1];
if (ij1 == 0 && ij2 == 0)
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
else if (ij1 == 0 && ij2 == 1)
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
}
else if (ij1 == 1 && ij2 == 0)
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
}
else
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
}
}
else if (sumcb == 5)
{
// the three face types for face 1,3 and 5 are 2/3 0 0 (not
// necessarily in this order)
newc[iel] = 4;
if (cb == 2 || cb == 3)
{
vassign[iel][0] = 4;
vassign[iel][1] = 5;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
else if (cb1 == 2 || cb1 == 3)
{
vassign[iel][0] = 2;
vassign[iel][1] = 3;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
else if (cb2 == 2 || cb2 == 3)
{
vassign[iel][0] = 1;
vassign[iel][1] = 3;
vassign[iel][2] = 5;
vassign[iel][3] = 7;
}
}
else if (sumcb == 8)
{
// the three face types for face 1,3 and 5 are 2 0 1 (not
// necessarily in this order)
// if face 2 of type 1
if (cb == 1)
{
if (cb1 == 2 || cb1 == 3)
{
ij1 = ijel[iel][5][0];
if (ij1 == 0)
{
newc[iel] = 4;
vassign[iel][0] = 2;
vassign[iel][1] = 3;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
else
{
ntemp = sje[iel][5][0][0];
if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
}
}
else if (cb2 == 2 || cb2 == 3)
{
if (ijel[iel][5][1] == 0)
{
newc[iel] = 4;
vassign[iel][0] = 1;
vassign[iel][1] = 3;
vassign[iel][2] = 5;
vassign[iel][3] = 7;
}
else
{
ntemp = sje[iel][5][0][0];
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 5;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 5;
vassign[iel][2] = 7;
}
}
}
// if face 4 of type 1
}
else if (cb1 == 1)
{
if (cb == 2 || cb == 3)
{
ij1 = ijel[iel][3][0];
ij2 = ijel[iel][3][1];
if (ij1 == 0 && ij2 == 0)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel)
{
newc[iel] = 3;
vassign[iel][0] = 5;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
else
{
newc[iel] = 4;
vassign[iel][0] = 4;
vassign[iel][1] = 5;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
}
else if (ij1 == 0 && ij2 == 1)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel)
{
newc[iel] = 3;
vassign[iel][0] = 4;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
else
{
newc[iel] = 4;
vassign[iel][0] = 4;
vassign[iel][1] = 5;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
}
else if (ij1 == 1 && ij2 == 0)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 5;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
}
else if (ij1 == 1 && ij2 == 1)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 4;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
}
}
else
{
if (ijel[iel][3][1] == 0)
{
newc[iel] = 4;
vassign[iel][0] = 1;
vassign[iel][1] = 3;
vassign[iel][2] = 5;
vassign[iel][3] = 7;
}
else
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 3;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 5;
vassign[iel][2] = 7;
}
}
}
// if face 6 of type 1
}
else if (cb2 == 1)
{
if (cb == 2 || cb == 3)
{
if (ijel[iel][1][0] == 0)
{
newc[iel] = 4;
vassign[iel][0] = 4;
vassign[iel][1] = 5;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
else
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 5;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 5;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
}
}
else
{
if (ijel[iel][1][1] == 0)
{
newc[iel] = 4;
vassign[iel][0] = 2;
vassign[iel][1] = 3;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
else
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 3;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
}
}
}
}
else if (sumcb == 11)
{
// the three face type for face 2,4 and 6 are 2 1 1(not
// necessarily in this order)
if (cb == 2 || cb == 3)
{
if (ijel[iel][3][0] == 0)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel)
{
newc[iel] = 3;
vassign[iel][0] = 5;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
else
{
newc[iel] = 4;
vassign[iel][0] = 4;
vassign[iel][1] = 5;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
// if ijel[iel][3][0]=1
}
else
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel)
{
ntemp1 = sje[iel][3][0][0];
if (cbc[ntemp1][4] == 3 && sje[ntemp1][4][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 5;
vassign[iel][1] = 7;
}
}
else
{
ntemp1 = sje[iel][3][0][0];
if (cbc[ntemp1][4] == 3 && sje[ntemp1][4][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 5;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
}
}
}
else if (cb1 == 2 || cb1 == 3)
{
if (ijel[iel][1][1] == 0)
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel)
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
else
{
newc[iel] = 4;
vassign[iel][0] = 2;
vassign[iel][1] = 3;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
// if ijel[iel][1][1]=1
}
else
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel)
{
ntemp1 = sje[iel][5][0][0];
if (cbc[ntemp1][2] == 3 && sje[ntemp1][2][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 3;
vassign[iel][1] = 7;
}
}
else
{
ntemp1 = sje[iel][5][0][0];
if (cbc[ntemp1][2] == 3 && sje[ntemp1][2][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 6;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 6;
vassign[iel][2] = 7;
}
}
}
}
else if (cb2 == 2 || cb2 == 3)
{
if (ijel[iel][5][1] == 0)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel)
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 5;
vassign[iel][2] = 7;
}
else
{
newc[iel] = 4;
vassign[iel][0] = 1;
vassign[iel][1] = 3;
vassign[iel][2] = 5;
vassign[iel][3] = 7;
}
// if ijel[iel][5][1]=1
}
else
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel)
{
ntemp1 = sje[iel][5][0][0];
if (cbc[ntemp1][0] == 3 && sje[ntemp1][0][0][0] < iel)
{
newc[iel] = 1;
vassign[iel][0] = 7;
}
else
{
newc[iel] = 2;
vassign[iel][0] = 3;
vassign[iel][1] = 7;
}
}
else
{
ntemp1 = sje[iel][5][0][0];
if (cbc[ntemp1][0] == 3 && sje[ntemp1][0][0][0] < iel)
{
newc[iel] = 2;
vassign[iel][0] = 5;
vassign[iel][1] = 7;
}
else
{
newc[iel] = 3;
vassign[iel][0] = 3;
vassign[iel][1] = 5;
vassign[iel][2] = 7;
}
}
}
}
}
else if (sumcb == 6)
{
// the three face type for face 1,3 and 5 are 0 0 0(not
// necessarily in this order)
newc[iel] = 8;
vassign[iel][0] = 0;
vassign[iel][1] = 1;
vassign[iel][2] = 2;
vassign[iel][3] = 3;
vassign[iel][4] = 4;
vassign[iel][5] = 5;
vassign[iel][6] = 6;
vassign[iel][7] = 7;
}
else if (sumcb == 9)
{
// the three face type for face 1,3 and 5 are 0 0 1(not
// necessarily in this order)
newc[iel] = 7;
vassign[iel][0] = 1;
vassign[iel][1] = 2;
vassign[iel][2] = 3;
vassign[iel][3] = 4;
vassign[iel][4] = 5;
vassign[iel][5] = 6;
vassign[iel][6] = 7;
}
else if (sumcb == 12)
{
// the three face type for face 1,3 and 5 are 0 1 1(not
// necessarily in this order)
if (cb == 0)
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][3] == 3 && sje[ntemp][3][0][0] < iel)
{
newc[iel] = 6;
vassign[iel][0] = 1;
vassign[iel][1] = 2;
vassign[iel][2] = 3;
vassign[iel][3] = 5;
vassign[iel][4] = 6;
vassign[iel][5] = 7;
}
else
{
newc[iel] = 7;
vassign[iel][0] = 1;
vassign[iel][1] = 2;
vassign[iel][2] = 3;
vassign[iel][3] = 4;
vassign[iel][4] = 5;
vassign[iel][5] = 6;
vassign[iel][6] = 7;
}
}
else if (cb1 == 0)
{
newc[iel] = 7;
vassign[iel][0] = 1;
vassign[iel][1] = 2;
vassign[iel][2] = 3;
vassign[iel][3] = 4;
vassign[iel][4] = 5;
vassign[iel][5] = 6;
vassign[iel][6] = 7;
}
else if (cb2 == 0)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel)
{
newc[iel] = 6;
vassign[iel][0] = 2;
vassign[iel][1] = 3;
vassign[iel][2] = 4;
vassign[iel][3] = 5;
vassign[iel][4] = 6;
vassign[iel][5] = 7;
}
else
{
newc[iel] = 7;
vassign[iel][0] = 1;
vassign[iel][1] = 2;
vassign[iel][2] = 3;
vassign[iel][3] = 4;
vassign[iel][4] = 5;
vassign[iel][5] = 6;
vassign[iel][6] = 7;
}
}
}
else if (sumcb == 15)
{
// the three face type for face 1,3 and 5 are 1 1 1(not
// necessarily in this order)
ntemp = sje[iel][3][0][0];
ntemp1 = sje[iel][1][0][0];
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel)
{
if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel)
{
if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel)
{
newc[iel] = 4;
vassign[iel][0] = 3;
vassign[iel][1] = 5;
vassign[iel][2] = 6;
vassign[iel][3] = 7;
}
else
{
newc[iel] = 5;
vassign[iel][0] = 2;
vassign[iel][1] = 3;
vassign[iel][2] = 5;
vassign[iel][3] = 6;
vassign[iel][4] = 7;
}
}
else
{
if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel)
{
newc[iel] = 5;
vassign[iel][0] = 3;
vassign[iel][1] = 4;
vassign[iel][2] = 5;
vassign[iel][3] = 6;
vassign[iel][4] = 7;
}
else
{
newc[iel] = 6;
vassign[iel][0] = 2;
vassign[iel][1] = 3;
vassign[iel][2] = 4;
vassign[iel][3] = 5;
vassign[iel][4] = 6;
vassign[iel][5] = 7;
}
}
}
else
{
if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel)
{
if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel)
{
newc[iel] = 5;
vassign[iel][0] = 1;
vassign[iel][1] = 3;
vassign[iel][2] = 5;
vassign[iel][3] = 6;
vassign[iel][4] = 7;
}
else
{
newc[iel] = 6;
vassign[iel][0] = 1;
vassign[iel][1] = 2;
vassign[iel][2] = 3;
vassign[iel][3] = 5;
vassign[iel][4] = 6;
vassign[iel][5] = 7;
}
}
else
{
if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel)
{
newc[iel] = 6;
vassign[iel][0] = 1;
vassign[iel][1] = 3;
vassign[iel][2] = 4;
vassign[iel][3] = 5;
vassign[iel][4] = 6;
vassign[iel][5] = 7;
}
else
{
newc[iel] = 7;
vassign[iel][0] = 1;
vassign[iel][1] = 2;
vassign[iel][2] = 3;
vassign[iel][3] = 4;
vassign[iel][4] = 5;
vassign[iel][5] = 6;
vassign[iel][6] = 7;
}
}
}
}
}
// end computing how many new mortar vertex points will be generated
// on each element.
// Compute (potentially in parallel) front[iel], which records how many
// new mortar point indices are to be generated from element 0 to iel.
// front[iel]=newc[0]+newc[1]+...+newc[iel]
ncopy(front, newc, nelt);
parallel_add(front);
// On each element, generate new mortar point indices and assign them
// to all elements sharing this mortar point. Note, if a mortar point
// is shared by several elements, the mortar point index of it will only
// be generated on the element with the lowest element index.
for (iel = 0; iel < nelt; iel++)
{
// compute the starting vertex mortar point index in element iel
front[iel] = front[iel] - newc[iel];
for (i = 0; i < newc[iel]; i++)
{
// count is the new mortar index number, which will be assigned
// to a vertex of iel and broadcast to all other elements sharing
// this vertex point.
count = front[iel] + i;
mortar_vertex(vassign[iel][i], iel, count);
}
}
// nvertex records how many mortar indices are for element vertices.
// It is used in the computation of the preconditioner.
nvertex = count + 1;
// CONFORMING EDGE AND FACE INTERIOR
// find out how many new mortar point indices will be assigned to all
// conforming edges and all conforming face interiors on each element
// eassign[iel][i]=1 indicates that the i'th edge on iel will
// generate new mortar points.
// ncon_edge[iel][i]=1 indicates that the i'th edge on iel is
// nonconforming
n1 = 12 * nelt;
l_init((int *)ncon_edge, n1, 0);
l_init((int *)eassign, n1, 0);
// fassign[iel][i]=1 indicates that the i'th face of iel will
// generate new mortar points
n2 = 6 * nelt;
l_init((int *)fassign, n2, 0);
// newe records how many new edges are to be assigned
// diagn[iel][n][0] records the element index of neighbor element of iel,
// that shares edge n of iel
// diagn[iel][n][1] records the neighbor element diagn[iel][n][0] shares
// which part of edge n of iel. diagn[iel][n][1]=0 refers to left
// or bottom half of the edge n, diagn[iel][n][1]=1 refers
// to the right or top part of edge n.
// if_1_edge[iel][n]=1 indicates that the size of iel is smaller than
// that of its neighbor connected, neighbored by edge n only
for (iel = 0; iel < nelt; iel++)
{
newc[iel] = 0;
newe[iel] = 0;
newi[iel] = 0;
cb1 = cbc[iel][0];
cb2 = cbc[iel][1];
cb3 = cbc[iel][2];
cb4 = cbc[iel][3];
cb5 = cbc[iel][4];
cb6 = cbc[iel][5];
// on face 6
if (cb6 == 0)
{
if (cb4 == 0 || cb4 == 1)
{
// if face 6 is of type 0 and face 4 is of type 0 or type 1, the edge
// shared by face 4 and 6 (edge 10) will generate new mortar point
// indices.
newe[iel] = newe[iel] + 1;
eassign[iel][10] = 1;
}
if (cb1 != 3)
{
// if face 1 is of type 3, the edge shared by face 6 and 1 (edge 0)
// will generate new mortar points indices.
newe[iel] = newe[iel] + 1;
eassign[iel][0] = 1;
}
if (cb3 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][8] = 1;
}
if (cb2 == 0 || cb2 == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][4] = 1;
}
}
else if (cb6 == 1)
{
if (cb4 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][10] = 1;
}
else if (cb4 == 1)
{
// If face 6 and face 4 both are of type 1, ntemp is the neighbor
// element on face 4.
ntemp = sje[iel][3][0][0];
// if ntemp's face 6 is not noncoforming or the neighbor element
// of ntemp on face 6 has an element index larger than iel, the
// edge shared by face 6 and 4 (edge 10) will generate new mortar
// point indices.
if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][10] = 1;
// if the face 6 of ntemp is of type 2
if (cbc[ntemp][5] == 2)
{
// The neighbor element of iel, neighbored by edge 10, is
// sje[ntemp][5][0][0] (the neighbor element of ntemp on ntemp's
// face 6).
diagn[iel][10][0] = sje[ntemp][5][0][0];
// The neighbor element of iel, neighbored by edge 10 shares
// the ijel[iel][5][1] part of edge 10 of iel
diagn[iel][10][1] = ijel[iel][5][1];
// edge 9 of element sje[ntemp][5][0][0] (the neighbor element of
// ntemp on ntemp's face 6) is a nonconforming edge
ncon_edge[sje[ntemp][5][0][0]][9] = 1;
// if_1_edge[iel][n]=1 indicates that iel is of a smaller
//size than its neighbor element, neighbored by edge n of iel only.
if_1_edge[iel][10] = 1;
}
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel)
{
diagn[iel][10][0] = sje[ntemp][5][ijel[iel][5][1]][1];
}
}
}
if (cb1 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][0] = 1;
}
else if (cb1 == 1)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][0] = 1;
if (cbc[ntemp][5] == 2)
{
diagn[iel][0][0] = sje[ntemp][5][0][0];
diagn[iel][0][1] = ijel[iel][5][0];
ncon_edge[sje[ntemp][5][0][0]][6] = 1;
if_1_edge[iel][0] = 1;
}
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel)
{
diagn[iel][0][0] = sje[ntemp][5][0][ijel[iel][5][0]];
}
}
}
else if (cb1 == 2)
{
if (ijel[iel][5][1] == 1)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][5] == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][0] = 1;
// if cbc[ntemp][5]=2
}
else
{
if (sje[ntemp][5][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][0] = 1;
diagn[iel][0][0] = sje[ntemp][5][0][0];
}
}
}
else
{
newe[iel] = newe[iel] + 1;
eassign[iel][0] = 1;
}
}
if (cb3 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][8] = 1;
}
else if (cb3 == 1)
{
ntemp = sje[iel][2][0][0];
if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][8] = 1;
if (cbc[ntemp][5] == 2)
{
diagn[iel][8][0] = sje[ntemp][5][0][0];
diagn[iel][8][1] = ijel[iel][5][1];
ncon_edge[sje[ntemp][5][0][0]][11] = 1;
if_1_edge[iel][8] = 1;
}
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel)
{
diagn[iel][8][0] = sje[ntemp][5][ijel[iel][5][1]][1];
}
}
}
else if (cb3 == 2)
{
if (ijel[iel][5][0] == 1)
{
ntemp = sje[iel][2][0][0];
if (cbc[ntemp][5] == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][8] = 1;
// if cbc[ntemp][5]=2
}
else
{
if (sje[ntemp][5][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][8] = 1;
diagn[iel][8][0] = sje[ntemp][5][0][0];
}
}
}
else
{
newe[iel] = newe[iel] + 1;
eassign[iel][8] = 1;
}
}
if (cb2 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][4] = 1;
}
else if (cb2 == 1)
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][4] = 1;
if (cbc[ntemp][5] == 2)
{
diagn[iel][4][0] = sje[ntemp][5][0][0];
diagn[iel][4][1] = ijel[iel][5][0];
ncon_edge[sje[ntemp][5][0][0]][2] = 1;
if_1_edge[iel][4] = 1;
}
if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel)
{
diagn[iel][8][0] = sje[ntemp][5][ijel[iel][5][1]][1];
}
}
}
}
// one face 4
if (cb4 == 0)
{
if (cb1 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][3] = 1;
}
if (cb5 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][11] = 1;
}
if (cb2 == 0 || cb2 == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][7] = 1;
}
}
else if (cb4 == 1)
{
if (cb1 == 2)
{
if (ijel[iel][3][1] == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][3] = 1;
}
else
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][0] != 3 || sje[ntemp][0][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][3] = 1;
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] > iel)
{
diagn[iel][3][0] = sje[ntemp][0][1][ijel[iel][3][0]];
}
}
}
}
else if (cb1 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][3] = 1;
}
else if (cb1 == 1)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][0] != 3 || sje[ntemp][0][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][3] = 1;
if (cbc[ntemp][0] == 2)
{
diagn[iel][3][0] = sje[ntemp][0][0][0];
diagn[iel][3][1] = ijel[iel][3][0];
ncon_edge[sje[ntemp][0][0][0]][5] = 1;
if_1_edge[iel][3] = 1;
}
if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] > iel)
{
diagn[iel][3][0] = sje[ntemp][0][1][ijel[iel][3][0]];
}
}
}
if (cb5 == 2)
{
if (ijel[iel][3][0] == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][11] = 1;
}
else
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][11] = 1;
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel)
{
diagn[iel][11][0] = sje[ntemp][4][ijel[iel][3][1]][1];
}
}
}
}
else if (cb5 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][11] = 1;
}
else if (cb5 == 1)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][11] = 1;
if (cbc[ntemp][4] == 2)
{
diagn[iel][11][0] = sje[ntemp][4][0][0];
diagn[iel][11][1] = ijel[iel][3][1];
ncon_edge[sje[ntemp][4][0][0]][8] = 1;
if_1_edge[iel][11] = 1;
}
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel)
{
diagn[iel][11][0] = sje[ntemp][4][ijel[iel][3][1]][1];
}
}
}
if (cb2 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][7] = 1;
}
else if (cb2 == 1)
{
ntemp = sje[iel][3][0][0];
if (cbc[ntemp][1] != 3 || sje[ntemp][1][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][7] = 1;
if (cbc[ntemp][1] == 2)
{
diagn[iel][7][0] = sje[ntemp][1][0][0];
diagn[iel][7][1] = ijel[iel][3][0];
ncon_edge[sje[ntemp][1][0][0]][1] = 1;
if_1_edge[iel][7] = 1;
}
if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] > iel)
{
diagn[iel][7][0] = sje[ntemp][2][1][ijel[iel][3][0]];
}
}
}
}
// on face 2
if (cb2 == 0)
{
if (cb3 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][5] = 1;
}
if (cb5 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][6] = 1;
}
}
else if (cb2 == 1)
{
if (cb3 == 2)
{
if (ijel[iel][1][1] == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][5] = 1;
}
else
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][2] != 3 || sje[ntemp][2][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][5] = 1;
if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] > iel)
{
diagn[iel][5][0] = sje[ntemp][2][1][ijel[iel][1][0]];
}
}
}
}
else if (cb3 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][5] = 1;
}
else if (cb3 == 1)
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][2] != 3 || sje[ntemp][2][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][5] = 1;
if (cbc[ntemp][2] == 2)
{
diagn[iel][5][0] = sje[ntemp][2][0][0];
diagn[iel][5][1] = ijel[iel][1][0];
ncon_edge[sje[ntemp][2][0][0]][3] = 1;
if_1_edge[iel][5] = 1;
}
if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] > iel)
{
diagn[iel][5][0] = sje[ntemp][2][1][ijel[iel][3][0]];
}
}
}
if (cb5 == 2)
{
if (ijel[iel][1][0] == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][6] = 1;
}
else
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][6] = 1;
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel)
{
diagn[iel][6][0] = sje[ntemp][4][1][ijel[iel][1][1]];
}
}
}
}
else if (cb5 == 0)
{
newe[iel] = newe[iel] + 1;
eassign[iel][6] = 1;
}
else if (cb5 == 1)
{
ntemp = sje[iel][1][0][0];
if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel)
{
newe[iel] = newe[iel] + 1;
eassign[iel][6] = 1;
if (cbc[ntemp][4] == 2)
{
diagn[iel][6][0] = sje[ntemp][4][0][0];
diagn[iel][6][1] = ijel[iel][1][1];
ncon_edge[sje[ntemp][4][0][0]][0] = 1;
if_1_edge[iel][6] = 1;
}
if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel)
{
diagn[iel][6][0] = sje[ntemp][4][ijel[iel][3][1]][1];
}
}
}
}
// on face 1
if (cb1 == 1)
{
newe[iel] = newe[iel] + 2;
eassign[iel][1] = 1;
if (cb3 == 1)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][2] == 2)
{
diagn[iel][1][0] = sje[ntemp][2][0][0];
diagn[iel][1][1] = ijel[iel][0][0];
ncon_edge[sje[ntemp][2][0][0]][7] = 1;
if_1_edge[iel][1] = 1;
}
else if (cbc[ntemp][2] == 3)
{
diagn[iel][1][0] = sje[ntemp][2][0][ijel[iel][0][0]];
}
}
else if (cb3 == 2)
{
ntemp = sje[iel][2][0][0];
if (ijel[iel][0][1] == 1)
{
if (cbc[ntemp][0] == 2)
{
diagn[iel][1][0] = sje[ntemp][0][0][0];
}
}
}
eassign[iel][2] = 1;
if (cb5 == 1)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][4] == 2)
{
diagn[iel][2][0] = sje[ntemp][4][0][0];
diagn[iel][2][1] = ijel[iel][0][1];
ncon_edge[sje[ntemp][4][0][0]][4] = 1;
if_1_edge[iel][2] = 1;
}
else if (cbc[ntemp][4] == 3)
{
diagn[iel][2][0] = sje[ntemp][4][0][ijel[iel][0][1]];
}
}
else if (cb5 == 2)
{
ntemp = sje[iel][4][0][0];
if (ijel[iel][0][0] == 1)
{
if (cbc[ntemp][0] == 2)
{
diagn[iel][2][0] = sje[ntemp][0][0][0];
}
}
}
}
else if (cb1 == 2)
{
if (cb3 == 2)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][2] != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][1] = 1;
if (cbc[ntemp][2] == 2)
{
diagn[iel][1][0] = sje[ntemp][2][0][0];
}
}
}
else if (cb3 == 0 || cb3 == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][1] = 1;
if (cb3 == 1)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][2] == 2)
{
diagn[iel][1][0] = sje[ntemp][2][0][0];
}
}
}
if (cb5 == 2)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][4] != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][2] = 1;
if (cbc[ntemp][4] == 2)
{
diagn[iel][2][0] = sje[ntemp][4][0][0];
}
}
}
else if (cb5 == 0 || cb5 == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][2] = 1;
if (cb5 == 1)
{
ntemp = sje[iel][0][0][0];
if (cbc[ntemp][4] == 2)
{
diagn[iel][2][0] = sje[ntemp][4][0][0];
}
}
}
}
else if (cb1 == 0)
{
if (cb3 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][1] = 1;
}
if (cb5 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][2] = 1;
}
}
// on face 3
if (cb3 == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][9] = 1;
if (cb5 == 1)
{
ntemp = sje[iel][2][0][0];
if (cbc[ntemp][4] == 2)
{
diagn[iel][9][0] = sje[ntemp][4][0][0];
diagn[iel][9][1] = ijel[iel][2][1];
ncon_edge[sje[ntemp][4][0][0]][10] = 1;
if_1_edge[iel][9] = 1;
}
}
if (ijel[iel][2][0] == 1)
{
ntemp = sje[iel][2][0][0];
if (cbc[ntemp][4] == 3)
{
diagn[iel][9][0] = sje[ntemp][4][ijel[iel][2][1]][0];
}
}
}
else if (cb3 == 2)
{
if (cb5 == 2)
{
ntemp = sje[iel][2][0][0];
if (cbc[ntemp][4] != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][9] = 1;
if (cbc[ntemp][4] == 2)
{
diagn[iel][9][0] = sje[ntemp][4][0][0];
}
}
}
else if (cb5 == 0 || cb5 == 1)
{
newe[iel] = newe[iel] + 1;
eassign[iel][9] = 1;
if (cb5 == 1)
{
ntemp = sje[iel][2][0][0];
if (cbc[ntemp][4] == 2)
{
diagn[iel][9][0] = sje[ntemp][4][0][0];
}
}
}
}
else if (cb3 == 0)
{
if (cb5 != 3)
{
newe[iel] = newe[iel] + 1;
eassign[iel][9] = 1;
}
}
// CONFORMING FACE INTERIOR
// find how many new mortar point indices will be assigned
// to face interiors on all faces on each element
// newi record how many new face interior points will be assigned
// on face 6
if (cb6 == 1 || cb6 == 0)
{
newi[iel] = newi[iel] + 9;
fassign[iel][5] = 1;
}
// on face 4
if (cb4 == 1 || cb4 == 0)
{
newi[iel] = newi[iel] + 9;
fassign[iel][3] = 1;
}
// on face 2
if (cb2 == 1 || cb2 == 0)
{
newi[iel] = newi[iel] + 9;
fassign[iel][1] = 1;
}
// on face 1
if (cb1 != 3)
{
newi[iel] = newi[iel] + 9;
fassign[iel][0] = 1;
}
// on face 3
if (cb3 != 3)
{
newi[iel] = newi[iel] + 9;
fassign[iel][2] = 1;
}
// on face 5
if (cb5 != 3)
{
newi[iel] = newi[iel] + 9;
fassign[iel][4] = 1;
}
// newc is the total number of new mortar point indices
// to be assigned to each element.
newc[iel] = newe[iel] * 3 + newi[iel];
}
// Compute (potentially in parallel) front[iel], which records how
// many new mortar point indices are to be assigned (to conforming
// edges and conforming face interiors) from element 0 to iel.
// front[iel]=newc[0]+newc[1]+...+newc[iel]
ncopy(front, newc, nelt);
parallel_add(front);
// nmor is the total number or mortar points
nmor = nvertex + front[nelt - 1];
// Generate (potentially in parallel) new mortar point indices on
// each conforming element face. On each face, first visit all
// conforming edges, and then the face interior.
for (iel = 0; iel < nelt; iel++)
{
front[iel] = front[iel] - newc[iel];
count = nvertex + front[iel];
for (i = 0; i < 6; i++)
{
cb1 = cbc[iel][i];
if (i < 2)
{
ne = 4;
space = 1;
}
else if (i < 4)
{
ne = 3;
space = 2;
// i loops over faces. Only 4 faces need to be examed for edge visit.
// On face 1, edge 0,1,2 and 3 will be visited. On face 2, edge 4,5,6
// and 7 will be visited. On face 3, edge 8 and 9 will be visited and
// on face 4, edge 10 and 11 will be visited. The 12 edges can be
// covered by four faces, there is no need to visit edges on face
// 5 and 6. So ne is set to be 0.
// However, i still needs to loop over 4 and 5, since the interiors
// of face 5 and 6 still need to be visited.
}
else
{
ne = 0;
space = 1;
}
for (ie = 0; ie < ne; ie += space)
{
edge_g = edgenumber[i][ie];
if (eassign[iel][edge_g])
{
// generate the new mortar points index, mor_v
mor_assign(mor_v, &count);
// assign mor_v to local edge ie of face i on element iel
mor_edge(ie, i, iel, mor_v);
// Since this edge is shared by another face of element
// iel, assign mor_v to the corresponding edge on the other
// face also.
// find the other face
face2 = f_e_ef[i][ie];
// find the local edge index of this edge on the other face
ie2 = localedgenumber[edge_g][face2];
// asssign mor_v to local edge ie2 of face face2 on element iel
mor_edge(ie2, face2, iel, mor_v);
// There are some neighbor elements also sharing this edge. Assign
// mor_v to neighbor element, neighbored by face i.
if (cbc[iel][i] == 2)
{
ntemp = sje[iel][i][0][0];
mor_edge(ie, jjface[i], ntemp, mor_v);
mor_edge(op[ie2], face2, ntemp, mor_v);
}
// assign mor_v to neighbor element neighbored by face face2
if (cbc[iel][face2] == 2)
{
ntemp = sje[iel][face2][0][0];
mor_edge(ie2, jjface[face2], ntemp, mor_v);
mor_edge(op[ie], i, ntemp, mor_v);
}
// assign mor_v to neighbor element sharing this edge
// if the neighbor is of the same size of iel
if (!if_1_edge[iel][edgenumber[i][ie]])
{
if (diagn[iel][edgenumber[i][ie]][0] != -1)
{
ntemp = diagn[iel][edgenumber[i][ie]][0];
mor_edge(op[ie2], jjface[face2], ntemp, mor_v);
mor_edge(op[ie], jjface[i], ntemp, mor_v);
}
// if the neighbor has a size larger than iel's
}
else
{
if (diagn[iel][edgenumber[i][ie]][0] != -1)
{
ntemp = diagn[iel][edgenumber[i][ie]][0];
mor_ne(mor_v, diagn[iel][edgenumber[i][ie]][1],
ie, i, ie2, face2, iel, ntemp);
}
}
}
}
if (fassign[iel][i])
{
// generate new mortar points index in face interior.
// if face i is of type 2 or iel doesn't have a neighbor element,
// assign new mortar point indices to interior mortar points
// of face i of iel.
cb = cbc[iel][i];
if (cb == 1 || cb == 0)
{
for (jj = 1; jj < LX1 - 1; jj++)
{
for (ii = 1; ii < LX1 - 1; ii++)
{
idmo[iel][i][0][0][jj][ii] = count;
count = count + 1;
}
}
// if face i is of type 2, assign new mortar point indices
// to iel as well as to the neighboring element on face i
}
else if (cb == 2)
{
if (idmo[iel][i][0][0][1][1] == -1)
{
ntemp = sje[iel][i][0][0];
jface = jjface[i];
for (jj = 1; jj < LX1 - 1; jj++)
{
for (ii = 1; ii < LX1 - 1; ii++)
{
idmo[iel][i][0][0][jj][ii] = count;
idmo[ntemp][jface][0][0][jj][ii] = count;
count = count + 1;
}
}
}
}
}
}
}
// for edges on nonconforming faces, copy the mortar points indices
// from neighbors.
for (iel = 0; iel < nelt; iel++)
{
for (i = 0; i < 6; i++)
{
cb = cbc[iel][i];
if (cb == 3)
{
// edges
edgecopy_s(i, iel);
}
// face interior
jface = jjface[i];
if (cb == 3)
{
for (iii = 0; iii < 2; iii++)
{
for (jjj = 0; jjj < 2; jjj++)
{
ntemp = sje[iel][i][jjj][iii];
for (jj = 0; jj < LX1; jj++)
{
for (ii = 0; ii < LX1; ii++)
{
idmo[iel][i][jjj][iii][jj][ii] =
idmo[ntemp][jface][0][0][jj][ii];
}
}
idmo[iel][i][jjj][iii][0][0] = idmo[ntemp][jface][0][0][0][0];
idmo[iel][i][jjj][iii][0][LX1 - 1] = idmo[ntemp][jface][1][0][0][LX1 - 1];
idmo[iel][i][jjj][iii][LX1 - 1][0] = idmo[ntemp][jface][0][1][LX1 - 1][0];
idmo[iel][i][jjj][iii][LX1 - 1][LX1 - 1] =
idmo[ntemp][jface][1][1][LX1 - 1][LX1 - 1];
}
}
}
}
}
}
//-----------------------------------------------------------------
// This subroutine fills array emo.
// emo records all elements sharing the same mortar point
// (only applies to element vertices) .
// emo[n][i][0] gives the element ID of the i'th element sharing
// mortar point n. (emo[n][i][0]=ie), ie is element
// index.
// emo[n][i][1] gives the vertex index of mortar point n on this
// element (emo[n][i][1]=ng), ng is the vertex index.
// nemo[n] records the total number of elements sharing mortar
// point n.
//-----------------------------------------------------------------
void get_emo(int ie, int n, int ng)
{
int ntemp, i;
int L1;
L1 = 0;
for (i = 0; i <= nemo[n]; i++)
{
if (emo[n][i][0] == ie) L1 = 1;
}
if (!L1)
{
ntemp = nemo[n] + 1;
nemo[n] = ntemp;
emo[n][ntemp][0] = ie;
emo[n][ntemp][1] = ng;
}
}
//-----------------------------------------------------------------
// Check whether the i's vertex of element iel is at the same
// location as j's vertex of element ntemp.
//-----------------------------------------------------------------
int ifsame(int iel, int i, int ntemp, int j)
{
if (ntemp == -1 || iel == -1) return 0;
if (xc[iel][i] == xc[ntemp][j] && yc[iel][i] == yc[ntemp][j] &&
zc[iel][i] == zc[ntemp][j])
{
return 1;
}
return 0;
}
//-----------------------------------------------------------------
// Assign three consecutive numbers for mor_v, which will
// be assigned to the three interior points of an edge as the
// mortar point indices.
//-----------------------------------------------------------------
void mor_assign(int mor_v[3], int *count)
{
int i;
for (i = 0; i < 3; i++)
{
mor_v[i] = *count;
*count = *count + 1;
}
}
//-----------------------------------------------------------------
// Copy the mortar points index from mor_v to local
// edge ie of the face'th face on element iel.
// The edge is conforming.
//-----------------------------------------------------------------
void mor_edge(int ie, int face, int iel, int mor_v[3])
{
int i, j, nn;
if (ie == 0)
{
j = 0;
for (nn = 1; nn < LX1 - 1; nn++)
{
idmo[iel][face][0][0][j][nn] = mor_v[nn - 1];
}
}
else if (ie == 1)
{
i = LX1 - 1;
for (nn = 1; nn < LX1 - 1; nn++)
{
idmo[iel][face][0][0][nn][i] = mor_v[nn - 1];
}
}
else if (ie == 2)
{
j = LX1 - 1;
for (nn = 1; nn < LX1 - 1; nn++)
{
idmo[iel][face][0][0][j][nn] = mor_v[nn - 1];
}
}
else if (ie == 3)
{
i = 0;
for (nn = 1; nn < LX1 - 1; nn++)
{
idmo[iel][face][0][0][nn][i] = mor_v[nn - 1];
}
}
}
//------------------------------------------------------------
// Copy mortar points index on edges from neighbor elements
// to an element face of the 3rd type.
//------------------------------------------------------------
void edgecopy_s(int face, int iel)
{
int ntemp1, ntemp2, ntemp3, ntemp4;
int edge_g, edge_l, face2, mor_s_v[2][4], i;
// find four neighbors on this face (3rd type)
ntemp1 = sje[iel][face][0][0];
ntemp2 = sje[iel][face][1][0];
ntemp3 = sje[iel][face][0][1];
ntemp4 = sje[iel][face][1][1];
// local edge 1
// mor_s_v is the array of mortar indices to be copied.
nr_init((int *)mor_s_v, 4 * 2, -1);
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[0][i - 1] = idmo[ntemp1][jjface[face]][0][0][0][i];
}
mor_s_v[0][LX1 - 2] = idmo[ntemp1][jjface[face]][1][0][0][LX1 - 1];
for (i = 0; i < LX1 - 1; i++)
{
mor_s_v[1][i] = idmo[ntemp2][jjface[face]][0][0][0][i];
}
// copy mor_s_v to local edge 0 on this face
mor_s_e(0, face, iel, mor_s_v);
// copy mor_s_v to the corresponding edge on the other face sharing
// local edge 0
face2 = f_e_ef[face][0];
edge_g = edgenumber[face][0];
edge_l = localedgenumber[edge_g][face2];
mor_s_e(edge_l, face2, iel, mor_s_v);
// local edge 1
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[0][i - 1] = idmo[ntemp2][jjface[face]][0][0][i][LX1 - 1];
}
mor_s_v[0][LX1 - 2] = idmo[ntemp2][jjface[face]][1][1][LX1 - 1][LX1 - 1];
mor_s_v[1][0] = idmo[ntemp4][jjface[face]][1][0][0][LX1 - 1];
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[1][i] = idmo[ntemp4][jjface[face]][0][0][i][LX1 - 1];
}
mor_s_e(1, face, iel, mor_s_v);
face2 = f_e_ef[face][1];
edge_g = edgenumber[face][1];
edge_l = localedgenumber[edge_g][face2];
mor_s_e(edge_l, face2, iel, mor_s_v);
// local edge 2
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[0][i - 1] = idmo[ntemp3][jjface[face]][0][0][LX1 - 1][i];
}
mor_s_v[0][LX1 - 2] = idmo[ntemp3][jjface[face]][1][1][LX1 - 1][LX1 - 1];
mor_s_v[1][0] = idmo[ntemp4][jjface[face]][0][1][LX1 - 1][0];
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[1][i] = idmo[ntemp4][jjface[face]][0][0][LX1 - 1][i];
}
mor_s_e(2, face, iel, mor_s_v);
face2 = f_e_ef[face][2];
edge_g = edgenumber[face][2];
edge_l = localedgenumber[edge_g][face2];
mor_s_e(edge_l, face2, iel, mor_s_v);
// local edge 3
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[0][i - 1] = idmo[ntemp1][jjface[face]][0][0][i][0];
}
mor_s_v[0][LX1 - 2] = idmo[ntemp1][jjface[face]][0][1][LX1 - 1][0];
for (i = 0; i < LX1 - 1; i++)
{
mor_s_v[1][i] = idmo[ntemp3][jjface[face]][0][0][i][0];
}
mor_s_e(3, face, iel, mor_s_v);
face2 = f_e_ef[face][3];
edge_g = edgenumber[face][3];
edge_l = localedgenumber[edge_g][face2];
mor_s_e(edge_l, face2, iel, mor_s_v);
}
//------------------------------------------------------------
// Copy mortar points index from mor_s_v to local edge n
// on face "face" of element iel. The edge is nonconforming.
//------------------------------------------------------------
void mor_s_e(int n, int face, int iel, int mor_s_v[2][4])
{
int i;
if (n == 0)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][0][0][0][i] = mor_s_v[0][i - 1];
}
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][1][0][0][i] = mor_s_v[1][i];
}
}
else if (n == 1)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][1][0][i][LX1 - 1] = mor_s_v[0][i - 1];
}
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][1][1][i][LX1 - 1] = mor_s_v[1][i];
}
}
else if (n == 2)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][0][1][LX1 - 1][i] = mor_s_v[0][i - 1];
}
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][1][1][LX1 - 1][i] = mor_s_v[1][i];
}
}
else if (n == 3)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][0][0][i][0] = mor_s_v[0][i - 1];
}
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][0][1][i][0] = mor_s_v[1][i];
}
}
}
//------------------------------------------------------------
// Copy mortar point indices from mor_s_v to local edge n
// on face "face" of element iel. nn is the edge mortar index,
// which indicates that mor_s_v corresponds to left/bottom or
// right/top part of the edge.
//------------------------------------------------------------
void mor_s_e_nn(int n, int face, int iel, int mor_s_v[4], int nn)
{
int i;
if (n == 0)
{
if (nn == 0)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][0][0][0][i] = mor_s_v[i - 1];
}
}
else
{
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][1][0][0][i] = mor_s_v[i];
}
}
}
else if (n == 1)
{
if (nn == 0)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][1][0][i][LX1 - 1] = mor_s_v[i - 1];
}
}
else
{
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][1][1][i][LX1 - 1] = mor_s_v[i];
}
}
}
else if (n == 2)
{
if (nn == 0)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][0][1][LX1 - 1][i] = mor_s_v[i - 1];
}
}
else
{
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][1][1][LX1 - 1][i] = mor_s_v[i];
}
}
}
else if (n == 3)
{
if (nn == 0)
{
for (i = 1; i < LX1; i++)
{
idmo[iel][face][0][0][i][0] = mor_s_v[i - 1];
}
}
else
{
for (i = 0; i < LX1 - 1; i++)
{
idmo[iel][face][0][1][i][0] = mor_s_v[i];
}
}
}
}
//---------------------------------------------------------------
// Assign mortar point index "count" to iel's i'th vertex
// and also to all elements sharing this vertex.
//---------------------------------------------------------------
void mortar_vertex(int i, int iel, int count)
{
int ntempx[8], ifntempx[8], lc_a[3], nnb[3];
int face_a[3], itemp, ntemp, ii, jj, j[3];
int iintempx[3], l, nbe, lc, temp;
int if_temp;
for (l = 0; l < 8; l++)
{
ntempx[l] = -1;
ifntempx[l] = -1;
}
// face_a records the three faces sharing this vertex on iel.
// lc_a gives the local corner number of this vertex on each
// face in face_a.
for (l = 0; l < 3; l++)
{
face_a[l] = f_c[i][l];
lc_a[l] = local_corner[face_a[l]][i];
}
// each vertex is shared by at most 8 elements.
// ntempx[j] gives the element index of a POSSIBLE element with its
// j'th vertex is iel's i'th vertex
// ifntempx[i]=ntempx[i] means ntempx[i] exists
// ifntempx[i]=-1 means ntempx[i] does not exist.
ntempx[7 - i] = iel;
ifntempx[7 - i] = iel;
// first find all elements sharing this vertex, ifntempx
// find the three possible neighbors of iel, neighbored by faces
// listed in array face_a
for (itemp = 0; itemp < 3; itemp++)
{
// j[itemp] is the local corner number of this vertex on the
// neighbor element on the corresponding face.
j[itemp] = c_f[jjface[face_a[itemp]]][lc_a[itemp]];
// iitempx[itemp] records the vertex index of i on the
// neighbor element, neighborned by face_a[itemp]
iintempx[itemp] = cal_intempx[face_a[itemp]][lc_a[itemp]];
// ntemp refers the neighbor element
ntemp = -1;
// if the face is nonconforming, find out in which piece of the
// mortar the vertex is located
ii = cal_iijj[lc_a[itemp]][0];
jj = cal_iijj[lc_a[itemp]][1];
ntemp = sje[iel][face_a[itemp]][jj][ii];
// if the face is conforming
if (ntemp == -1)
{
ntemp = sje[iel][face_a[itemp]][0][0];
// find the possible neighbor
ntempx[iintempx[itemp]] = ntemp;
// check whether this possible neighbor is a real neighbor or not
if (ntemp != -1)
{
if (ifsame(ntemp, j[itemp], iel, i))
{
ifntempx[iintempx[itemp]] = ntemp;
}
}
// if the face is nonconforming
}
else
{
if (ntemp != -1)
{
if (ifsame(ntemp, j[itemp], iel, i))
{
ifntempx[iintempx[itemp]] = ntemp;
ntempx[iintempx[itemp]] = ntemp;
}
}
}
}
// find the possible three neighbors, neighbored by an edge only
for (l = 0; l < 3; l++)
{
// find first existing neighbor of any of the faces in array face_a
if_temp = 0;
if (l == 0)
{
if_temp = 1;
}
else if (l == 1)
{
if (ifntempx[iintempx[l - 1]] == -1)
{
if_temp = 1;
}
}
else if (l == 2)
{
if (ifntempx[iintempx[l - 1]] == -1 && ifntempx[iintempx[l - 2]] == -1)
{
if_temp = 1;
}
}
if (if_temp)
{
if (ifntempx[iintempx[l]] != -1)
{
nbe = ifntempx[iintempx[l]];
// if 1st neighor exists, check the neighbor's two neighbors in
// the other two directions.
// e.g. if l=0, check directions 1 and 2,i.e. itemp=1,2,1
// if l=1, itemp=2,0,-2
// if l=2, itemp=0,1,1
itemp = face_l1[l];
while ((l != 1 && itemp <= face_l2[l]) ||
(l == 1 && itemp >= face_l2[l]))
{
//lc is the local corner number of this vertex on face face_a[itemp]
// on the neighbor element of iel, neighbored by a face face_a[l]
lc = local_corner[face_a[itemp]][j[l]];
// temp is the vertex index of this vertex on the neighbor element
// neighbored by an edge
temp = cal_intempx[face_a[itemp]][lc];
ii = cal_iijj[lc][0];
jj = cal_iijj[lc][1];
ntemp = sje[nbe][face_a[itemp]][jj][ii];
// if the face face_a[itemp] is conforming
if (ntemp == -1)
{
ntemp = sje[nbe][face_a[itemp]][0][0];
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[itemp]]][lc], nbe, j[l]))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
// nnb[itemp] records the neighbor element neighbored by an
// edge only
nnb[itemp] = ntemp;
}
}
// if the face face_a[itemp] is nonconforming
}
else
{
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[itemp]]][lc], nbe, j[l]))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
nnb[itemp] = ntemp;
}
}
}
itemp += face_ld[l];
}
// check the last neighbor element, neighbored by an edge
// ifntempx[iintempx[l]] has been visited in the above, now
// check another neighbor element(nbe) neighbored by a face
// if the neighbor element is neighbored by face
// face_a[face_l1[l]] exists
if (ifntempx[iintempx[face_l1[l]]] != -1)
{
nbe = ifntempx[iintempx[face_l1[l]]];
// itemp is the last direction other than l and face_l1[l]
itemp = face_l2[l];
lc = local_corner[face_a[itemp]][j[face_l1[l]]];
temp = cal_intempx[face_a[itemp]][lc];
ii = cal_iijj[lc][0];
jj = cal_iijj[lc][1];
// ntemp records the last neighbor element neighbored by an edge
// with element iel
ntemp = sje[nbe][face_a[itemp]][jj][ii];
// if conforming
if (ntemp == -1)
{
ntemp = sje[nbe][face_a[itemp]][0][0];
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[itemp]]][lc], nbe,
j[face_l1[l]]))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
nnb[l] = ntemp;
}
}
// if nonconforming
}
else
{
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[itemp]]][lc], nbe,
j[face_l1[l]]))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
nnb[l] = ntemp;
}
}
}
// if the neighbor element neighbored by face face_a[face_l2[l]]
// does not exist
}
else if (ifntempx[iintempx[face_l2[l]]] != -1)
{
nbe = ifntempx[iintempx[face_l2[l]]];
itemp = face_l1[l];
lc = local_corner[face_a[itemp]][j[face_l2[l]]];
temp = cal_intempx[face_a[itemp]][lc];
ii = cal_iijj[lc][0];
jj = cal_iijj[lc][1];
ntemp = sje[nbe][face_a[itemp]][jj][ii];
if (ntemp == -1)
{
ntemp = sje[nbe][face_a[itemp]][0][0];
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[itemp]]][lc], nbe,
j[face_l2[l]]))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
nnb[l] = ntemp;
}
}
}
else
{
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[itemp]]][lc], nbe,
j[face_l2[l]]))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
nnb[l] = ntemp;
}
}
}
}
}
}
}
// check the neighbor element, neighbored by a vertex only
// nnb are the three possible neighbor elements neighbored by an edge
nnb[0] = ifntempx[cal_nnb[i][0]];
nnb[1] = ifntempx[cal_nnb[i][1]];
nnb[2] = ifntempx[cal_nnb[i][2]];
ntemp = -1;
// the neighbor element neighbored by a vertex must be a neighbor of
// a valid(non-negative) nnb[i], neighbored by a face
if (nnb[0] != -1)
{
lc = oplc[local_corner[face_a[2]][i]];
ii = cal_iijj[lc][0];
jj = cal_iijj[lc][1];
// ntemp records the neighbor of iel, neighbored by vertex i
ntemp = sje[nnb[0]][face_a[2]][jj][ii];
// temp is the vertex index of i on ntemp
temp = cal_intempx[face_a[2]][lc];
if (ntemp == -1)
{
ntemp = sje[nnb[0]][face_a[2]][0][0];
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[2]]][lc], iel, i))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
}
}
}
else
{
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[2]]][lc], iel, i))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
}
}
}
}
else if (nnb[1] != -1)
{
lc = oplc[local_corner[face_a[0]][i]];
ii = cal_iijj[lc][0];
jj = cal_iijj[lc][1];
ntemp = sje[nnb[1]][face_a[0]][jj][ii];
temp = cal_intempx[face_a[0]][lc];
if (ntemp == -1)
{
ntemp = sje[nnb[1]][face_a[0]][0][0];
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[0]]][lc], iel, i))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
}
}
}
else
{
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[0]]][lc], iel, i))
{
ntempx[temp] = ntemp;
ifntempx[temp] = ntemp;
}
}
}
}
else if (nnb[2] != -1)
{
lc = oplc[local_corner[face_a[1]][i]];
ii = cal_iijj[lc][0];
jj = cal_iijj[lc][1];
ntemp = sje[nnb[2]][face_a[1]][jj][ii];
temp = cal_intempx[face_a[1]][lc];
if (ntemp == -1)
{
ntemp = sje[nnb[2]][face_a[1]][0][0];
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[1]]][lc], iel, i))
{
ifntempx[temp] = ntemp;
ntempx[temp] = ntemp;
}
}
}
else
{
if (ntemp != -1)
{
if (ifsame(ntemp, c_f[jjface[face_a[1]]][lc], iel, i))
{
ifntempx[temp] = ntemp;
ntempx[temp] = ntemp;
}
}
}
}
// ifntempx records all elements sharing this vertex, assign count
// to all these elements.
if (ifntempx[0] != -1)
{
idmo[ntempx[0]][0][1][1][LX1 - 1][LX1 - 1] = count;
idmo[ntempx[0]][2][1][1][LX1 - 1][LX1 - 1] = count;
idmo[ntempx[0]][4][1][1][LX1 - 1][LX1 - 1] = count;
get_emo(ntempx[0], count, 7);
}
if (ifntempx[1] != -1)
{
idmo[ntempx[1]][1][1][1][LX1 - 1][LX1 - 1] = count;
idmo[ntempx[1]][2][0][1][LX1 - 1][0] = count;
idmo[ntempx[1]][4][0][1][LX1 - 1][0] = count;
get_emo(ntempx[1], count, 6);
}
if (ifntempx[2] != -1)
{
idmo[ntempx[2]][0][0][1][LX1 - 1][0] = count;
idmo[ntempx[2]][3][1][1][LX1 - 1][LX1 - 1] = count;
idmo[ntempx[2]][4][1][0][0][LX1 - 1] = count;
get_emo(ntempx[2], count, 5);
}
if (ifntempx[3] != -1)
{
idmo[ntempx[3]][1][0][1][LX1 - 1][0] = count;
idmo[ntempx[3]][3][0][1][LX1 - 1][0] = count;
idmo[ntempx[3]][4][0][0][0][0] = count;
get_emo(ntempx[3], count, 4);
}
if (ifntempx[4] != -1)
{
idmo[ntempx[4]][0][1][0][0][LX1 - 1] = count;
idmo[ntempx[4]][2][1][0][0][LX1 - 1] = count;
idmo[ntempx[4]][5][1][1][LX1 - 1][LX1 - 1] = count;
get_emo(ntempx[4], count, 3);
}
if (ifntempx[5] != -1)
{
idmo[ntempx[5]][1][1][0][0][LX1 - 1] = count;
idmo[ntempx[5]][2][0][0][0][0] = count;
idmo[ntempx[5]][5][0][1][LX1 - 1][0] = count;
get_emo(ntempx[5], count, 2);
}
if (ifntempx[6] != -1)
{
idmo[ntempx[6]][0][0][0][0][0] = count;
idmo[ntempx[6]][3][1][0][0][LX1 - 1] = count;
idmo[ntempx[6]][5][1][0][0][LX1 - 1] = count;
get_emo(ntempx[6], count, 1);
}
if (ifntempx[7] != -1)
{
idmo[ntempx[7]][1][0][0][0][0] = count;
idmo[ntempx[7]][3][0][0][0][0] = count;
idmo[ntempx[7]][5][0][0][0][0] = count;
get_emo(ntempx[7], count, 0);
}
}
//---------------------------------------------------------------
// Copy the mortar points index (mor_v + vertex mortar point) from
// edge'th local edge on face'th face on element ntemp to iel.
// ntemp is iel's neighbor, neighbored by this edge only.
// This subroutine is for the situation that iel is of larger
// size than ntemp.
// face, face2 are face indices
// edge and edge2 are local edge numbers of this edge on face and face2
// nn is edge motar index, which indicate whether this edge
// corresponds to the left/bottom or right/top part of the edge
// on iel.
//---------------------------------------------------------------
void mor_ne(int mor_v[3], int nn, int edge, int face,
int edge2, int face2, int ntemp, int iel)
{
int i, mor_s_v[4] = {0,};
// get mor_s_v which is the mor_v + vertex mortar
if (edge == 2)
{
if (nn == 0)
{
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i - 1] = mor_v[i - 1];
}
mor_s_v[3] = idmo[ntemp][face][1][1][LX1 - 1][LX1 - 1];
}
else
{
mor_s_v[0] = idmo[ntemp][face][0][1][LX1 - 1][0];
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i] = mor_v[i - 1];
}
}
}
else if (edge == 3)
{
if (nn == 0)
{
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i - 1] = mor_v[i - 1];
}
mor_s_v[3] = idmo[ntemp][face][0][1][LX1 - 1][0];
}
else
{
mor_s_v[0] = idmo[ntemp][face][0][0][0][0];
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i] = mor_v[i - 1];
}
}
}
else if (edge == 0)
{
if (nn == 0)
{
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i - 1] = mor_v[i - 1];
}
mor_s_v[3] = idmo[ntemp][face][1][0][0][LX1 - 1];
}
else
{
mor_s_v[0] = idmo[ntemp][face][0][0][0][0];
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i] = mor_v[i - 1];
}
}
}
else if (edge == 1)
{
if (nn == 0)
{
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i - 1] = mor_v[i - 1];
}
mor_s_v[3] = idmo[ntemp][face][1][1][LX1 - 1][LX1 - 1];
}
else
{
mor_s_v[0] = idmo[ntemp][face][1][0][0][LX1 - 1];
for (i = 1; i < LX1 - 1; i++)
{
mor_s_v[i] = mor_v[i - 1];
}
}
}
// copy mor_s_v to iel's local edge(op[edge]), on face jjface[face]
mor_s_e_nn(op[edge], jjface[face], iel, mor_s_v, nn);
// copy mor_s_v to iel's local edge(op[edge2]), on face jjface[face2]
// since this edge is shared by two faces on iel
mor_s_e_nn(op[edge2], jjface[face2], iel, mor_s_v, nn);
}
//---------------------------------------------------------------
// move element to proper location in morton space filling curve
//---------------------------------------------------------------
void move()
{
int i, iside, jface, iel, ntemp, ii1, ii2, n1, n2, cb;
n2 = 2 * 6 * nelt;
n1 = n2 * 2;
nr_init((int *)sje_new, n1, -1);
nr_init((int *)ijel_new, n2, -1);
#pragma omp parallel for default(shared) private(iel, iside, ii2, ii1, i, jface, cb, ntemp) firstprivate(nelt, mt_to_id, tree, xc, yc, zc, jjface, cbc, sje, id_to_mt, ijel, ta1)
for (iel = 0; iel < nelt; iel++)
{
i = mt_to_id[iel];
treenew[iel] = tree[i];
copy(xc_new[iel], xc[i], 8);
copy(yc_new[iel], yc[i], 8);
copy(zc_new[iel], zc[i], 8);
for (iside = 0; iside < NSIDES; iside++)
{
jface = jjface[iside];
cb = cbc[i][iside];
xc_new[iel][iside] = xc[i][iside];
yc_new[iel][iside] = yc[i][iside];
zc_new[iel][iside] = zc[i][iside];
cbc_new[iel][iside] = cb;
if (cb == 2)
{
ntemp = sje[i][iside][0][0];
ijel_new[iel][iside][0] = 0;
ijel_new[iel][iside][1] = 0;
sje_new[iel][iside][0][0] = id_to_mt[ntemp];
}
else if (cb == 1)
{
ntemp = sje[i][iside][0][0];
ijel_new[iel][iside][0] = ijel[i][iside][0];
ijel_new[iel][iside][1] = ijel[i][iside][1];
sje_new[iel][iside][0][0] = id_to_mt[ntemp];
}
else if (cb == 3)
{
for (ii2 = 0; ii2 < 2; ii2++)
{
for (ii1 = 0; ii1 < 2; ii1++)
{
ntemp = sje[i][iside][ii2][ii1];
ijel_new[iel][iside][0] = 0;
ijel_new[iel][iside][1] = 0;
sje_new[iel][iside][ii2][ii1] = id_to_mt[ntemp];
}
}
}
else if (cb == 0)
{
sje_new[iel][iside][0][0] = -1;
sje_new[iel][iside][1][0] = -1;
sje_new[iel][iside][0][1] = -1;
sje_new[iel][iside][1][1] = -1;
}
}
copy(ta2[iel][0][0], ta1[i][0][0], NXYZ);
}
copy((double *)xc, (double *)xc_new, 8 * nelt);
copy((double *)yc, (double *)yc_new, 8 * nelt);
copy((double *)zc, (double *)zc_new, 8 * nelt);
ncopy((int *)sje, (int *)sje_new, 4 * 6 * nelt);
ncopy((int *)ijel, (int *)ijel_new, 2 * 6 * nelt);
ncopy((int *)cbc, (int *)cbc_new, 6 * nelt);
ncopy((int *)tree, (int *)treenew, nelt);
copy((double *)ta1, (double *)ta2, NXYZ * nelt);
#pragma omp parallel for default(shared) private(iel) firstprivate(nelt)
for (iel = 0; iel < nelt; iel++)
{
mt_to_id[iel] = iel;
id_to_mt[iel] = iel;
}
}
//------------------------------------------------------------------
// Generate diagonal preconditioner for CG.
// Preconditioner computed in this subroutine is correct only
// for collocation point in element interior, on conforming face
// interior and conforming edge.
//------------------------------------------------------------------
void setuppc()
{
double dxtm1_2[LX1][LX1], rdtime;
int ie, k, i, j, q, isize;
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
dxtm1_2[j][i] = dxtm1[j][i] * dxtm1[j][i];
}
}
rdtime = 1.0 / dtime;
#pragma omp parallel for default(shared) private(ie, k, j, i, q, isize) firstprivate(nelt, rdtime, size_e, g1m1_s, dxtm1_2, bm1_s)
for (ie = 0; ie < nelt; ie++)
{
r_init(dpcelm[ie][0][0], NXYZ, 0.0);
isize = size_e[ie];
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
for (q = 0; q < LX1; q++)
{
dpcelm[ie][k][j][i] = dpcelm[ie][k][j][i] +
g1m1_s[isize][k][j][q] * dxtm1_2[q][i] +
g1m1_s[isize][k][q][i] * dxtm1_2[q][j] +
g1m1_s[isize][q][j][i] * dxtm1_2[q][k];
}
dpcelm[ie][k][j][i] = VISC * dpcelm[ie][k][j][i] +
rdtime * bm1_s[isize][k][j][i];
}
}
}
}
// do the stiffness summation
dssum();
// take inverse.
reciprocal((double *)dpcelm, ntot);
// compute preconditioner on mortar points. NOTE: dpcmor for
// nonconforming cases will be corrected in subroutine setpcmo
#pragma omp parallel for default(shared) private(i) firstprivate(nmor)
for (i = 0; i < nmor; i++)
{
dpcmor[i] = 1.0 / dpcmor[i];
}
}
//--------------------------------------------------------------
// pre-compute elemental contribution to preconditioner
// for all situations
//--------------------------------------------------------------
void setpcmo_pre()
{
int element_size, i, j, ii, jj, col;
double p[LX1][LX1][LX1], p0[LX1][LX1][LX1], mtemp[LX1][LX1];
double temp[LX1][LX1][LX1], temp1[LX1][LX1], tmp[LX1][LX1], tig[LX1];
// corners on face of type 3
r_init((double *)tcpre, LX1 * LX1, 0.0);
r_init((double *)tmp, LX1 * LX1, 0.0);
r_init(tig, 5, 0.0);
tig[0] = 1.0;
tmp[0][0] = 1.0;
// tcpre results from mapping a unit spike field (unity at
// collocation point (0,0), zero elsewhere) on an entire element
// face to the (0,0) segment of a nonconforming face
for (i = 1; i < LX1 - 1; i++)
{
for (j = 0; j < LX1; j++)
{
tmp[0][i] = tmp[0][i] + qbnew[0][j][i - 1] * tig[j];
}
}
for (col = 0; col < LX1; col++)
{
tcpre[0][col] = tmp[0][col];
for (j = 1; j < LX1 - 1; j++)
{
for (i = 0; i < LX1; i++)
{
tcpre[j][col] = tcpre[j][col] + qbnew[0][i][j - 1] * tmp[i][col];
}
}
}
for (element_size = 0; element_size < REFINE_MAX; element_size++)
{
// for conforming cases
// pcmor_c[element_size][j][i] records the intermediate value
// (preconditioner=1/pcmor_c) of the preconditor on collocation
// point (i,j) on a conforming face of an element of size
// element_size.
for (j = 0; j < LX1 / 2 + 1; j++)
{
for (i = j; i < LX1 / 2 + 1; i++)
{
r_init((double *)p, NXYZ, 0.0);
p[0][j][i] = 1.0;
laplacian(temp, p, element_size);
pcmor_c[element_size][j][i] = temp[0][j][i];
pcmor_c[element_size][j][LX1 - 1 - i] = temp[0][j][i];
pcmor_c[element_size][i][j] = temp[0][j][i];
pcmor_c[element_size][i][LX1 - 1 - j] = temp[0][j][i];
pcmor_c[element_size][LX1 - 1 - i][j] = temp[0][j][i];
pcmor_c[element_size][LX1 - 1 - i][LX1 - 1 - j] = temp[0][j][i];
pcmor_c[element_size][LX1 - 1 - j][i] = temp[0][j][i];
pcmor_c[element_size][LX1 - 1 - j][LX1 - 1 - i] = temp[0][j][i];
}
}
// for nonconforming cases
// nonconforming face interior
// pcmor_nc1[element_size][jj][ii][j][i] records the intermediate
// preconditioner value on collocation point (i,j) on mortar
// (ii,jj) on a nonconforming face of an element of size element_
// size
for (j = 1; j < LX1; j++)
{
for (i = j; i < LX1; i++)
{
r_init((double *)mtemp, LX1 * LX1, 0.0);
r_init((double *)p, NXYZ, 0.0);
mtemp[j][i] = 1.0;
// when i, j=LX1-1, mortar points are duplicated, so mtemp needs
// to be doubled.
if (i == (LX1 - 1)) mtemp[j][i] = mtemp[j][i] * 2.0;
if (j == (LX1 - 1)) mtemp[j][i] = mtemp[j][i] * 2.0;
transf_nc(mtemp, (double (*)[LX1])p);
laplacian(temp, p, element_size);
transfb_nc1(temp1, (double (*)[LX1])temp);
// values at points (i,j) and (j,i) are the same
pcmor_nc1[element_size][0][0][j][i] = temp1[j][i];
pcmor_nc1[element_size][0][0][i][j] = temp1[j][i];
}
// when i, j=LX1-1, mortar points are duplicated. so pcmor_nc1 needs
// to be doubled on those points
pcmor_nc1[element_size][0][0][j][LX1 - 1] =
pcmor_nc1[element_size][0][0][j][LX1 - 1] * 2.0;
pcmor_nc1[element_size][0][0][LX1 - 1][j] =
pcmor_nc1[element_size][0][0][j][LX1 - 1];
}
pcmor_nc1[element_size][0][0][LX1 - 1][LX1 - 1] =
pcmor_nc1[element_size][0][0][LX1 - 1][LX1 - 1] * 2.0;
// nonconforming edges
j = 0;
for (i = 1; i < LX1; i++)
{
r_init((double *)mtemp, LX1 * LX1, 0.0);
r_init((double *)p, NXYZ, 0.0);
r_init((double *)p0, NXYZ, 0.0);
mtemp[j][i] = 1.0;
if (i == (LX1 - 1)) mtemp[j][i] = 2.0;
transf_nc(mtemp, (double (*)[LX1])p);
laplacian(temp, p, element_size);
transfb_nc1(temp1, (double (*)[LX1])temp);
pcmor_nc1[element_size][0][0][j][i] = temp1[j][i];
pcmor_nc1[element_size][0][0][i][j] = temp1[j][i];
for (ii = 0; ii < LX1; ii++)
{
// p0 is for the case that a nonconforming edge is shared by
// two conforming faces
p0[0][0][ii] = p[0][0][ii];
for (jj = 0; jj < LX1; jj++)
{
// now p is for the case that a nonconforming edge is shared
// by nonconforming faces
p[jj][0][ii] = p[0][jj][ii];
}
}
laplacian(temp, p, element_size);
transfb_nc2(temp1, (double (*)[LX1])temp);
// pcmor_nc2[element_size][jj][ii][j][i] gives the intermediate
// preconditioner value on collocation point (i,j) on a
// nonconforming face of an element with size size_element
pcmor_nc2[element_size][0][0][j][i] = temp1[j][i] * 2.0 ;
pcmor_nc2[element_size][0][0][i][j] =
pcmor_nc2[element_size][0][0][j][i];
laplacian(temp, p0, element_size);
transfb_nc0(temp1, temp);
// pcmor_nc0[element_size][jj][ii][j][i] gives the intermediate
// preconditioner value on collocation point (i,j) on a
// conforming face of an element, which shares a nonconforming
// edge with another conforming face
pcmor_nc0[element_size][0][0][j][i] = temp1[j][i];
pcmor_nc0[element_size][0][0][i][j] = temp1[j][i];
}
pcmor_nc1[element_size][0][0][j][LX1 - 1] =
pcmor_nc1[element_size][0][0][j][LX1 - 1] * 2.0;
pcmor_nc1[element_size][0][0][LX1 - 1][j] =
pcmor_nc1[element_size][0][0][j][LX1 - 1];
pcmor_nc2[element_size][0][0][j][LX1 - 1] =
pcmor_nc2[element_size][0][0][j][LX1 - 1] * 2.0;
pcmor_nc2[element_size][0][0][LX1 - 1][j] =
pcmor_nc2[element_size][0][0][j][LX1 - 1];
pcmor_nc0[element_size][0][0][j][LX1 - 1] =
pcmor_nc0[element_size][0][0][j][LX1 - 1] * 2.0;
pcmor_nc0[element_size][0][0][LX1 - 1][j] =
pcmor_nc0[element_size][0][0][j][LX1 - 1];
// symmetrical copy
for (i = 0; i < LX1 - 1; i++)
{
pcmor_nc1[element_size][1][0][j][i] =
pcmor_nc1[element_size][0][0][j][LX1 - 1 - i];
pcmor_nc0[element_size][1][0][j][i] =
pcmor_nc0[element_size][0][0][j][LX1 - 1 - i];
pcmor_nc2[element_size][1][0][j][i] =
pcmor_nc2[element_size][0][0][j][LX1 - 1 - i];
}
for (j = 1; j < LX1; j++)
{
for (i = 0; i < LX1 - 1; i++)
{
pcmor_nc1[element_size][1][0][j][i] =
pcmor_nc1[element_size][0][0][j][LX1 - 1 - i];
}
i = LX1 - 1;
pcmor_nc1[element_size][1][0][j][i] =
pcmor_nc1[element_size][0][0][j][LX1 - 1 - i];
pcmor_nc0[element_size][1][0][j][i] =
pcmor_nc0[element_size][0][0][j][LX1 - 1 - i];
pcmor_nc2[element_size][1][0][j][i] =
pcmor_nc2[element_size][0][0][j][LX1 - 1 - i];
}
j = 0;
i = 0;
pcmor_nc1[element_size][0][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][i];
pcmor_nc0[element_size][0][1][j][i] =
pcmor_nc0[element_size][0][0][LX1 - 1 - j][i];
pcmor_nc2[element_size][0][1][j][i] =
pcmor_nc2[element_size][0][0][LX1 - 1 - j][i];
for (j = 1; j < LX1 - 1; j++)
{
i = 0;
pcmor_nc1[element_size][0][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][i];
pcmor_nc0[element_size][0][1][j][i] =
pcmor_nc0[element_size][0][0][LX1 - 1 - j][i];
pcmor_nc2[element_size][0][1][j][i] =
pcmor_nc2[element_size][0][0][LX1 - 1 - j][i];
for (i = 1; i < LX1; i++)
{
pcmor_nc1[element_size][0][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][i];
}
}
j = LX1 - 1;
for (i = 1; i < LX1; i++)
{
pcmor_nc1[element_size][0][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][i];
pcmor_nc0[element_size][0][1][j][i] =
pcmor_nc0[element_size][0][0][LX1 - 1 - j][i];
pcmor_nc2[element_size][0][1][j][i] =
pcmor_nc2[element_size][0][0][LX1 - 1 - j][i];
}
j = 0;
i = LX1 - 1;
pcmor_nc1[element_size][1][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
pcmor_nc0[element_size][1][1][j][i] =
pcmor_nc0[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
pcmor_nc2[element_size][1][1][j][i] =
pcmor_nc2[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
for (j = 1; j < LX1 - 1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
pcmor_nc1[element_size][1][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
}
i = LX1 - 1;
pcmor_nc1[element_size][1][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
pcmor_nc0[element_size][1][1][j][i] =
pcmor_nc0[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
pcmor_nc2[element_size][1][1][j][i] =
pcmor_nc2[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
}
j = LX1 - 1;
for (i = 1; i < LX1 - 1; i++)
{
pcmor_nc1[element_size][1][1][j][i] =
pcmor_nc1[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
pcmor_nc0[element_size][1][1][j][i] =
pcmor_nc0[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
pcmor_nc2[element_size][1][1][j][i] =
pcmor_nc2[element_size][0][0][LX1 - 1 - j][LX1 - 1 - i];
}
// vertices shared by at least one nonconforming face or edge
// Among three edges and three faces sharing a vertex on an element
// situation 1: only one edge is nonconforming
// situation 2: two edges are nonconforming
// situation 3: three edges are nonconforming
// situation 4: one face is nonconforming
// situation 5: one face and one edge are nonconforming
// situation 6: two faces are nonconforming
// situation 7: three faces are nonconforming
r_init((double *)p0, NXYZ, 0.0);
p0[0][0][0] = 1.0;
laplacian(temp, p0, element_size);
pcmor_cor[element_size][7] = temp[0][0][0];
// situation 1
r_init((double *)p0, NXYZ, 0.0);
for (i = 0; i < LX1; i++)
{
p0[0][0][i] = tcpre[0][i];
}
laplacian(temp, p0, element_size);
transfb_cor_e(1, &pcmor_cor[element_size][0], temp);
// situation 2
r_init((double *)p0, NXYZ, 0.0);
for (i = 0; i < LX1; i++)
{
p0[0][0][i] = tcpre[0][i];
p0[0][i][0] = tcpre[0][i];
}
laplacian(temp, p0, element_size);
transfb_cor_e(2, &pcmor_cor[element_size][1], temp);
// situation 3
r_init((double *)p0, NXYZ, 0.0);
for (i = 0; i < LX1; i++)
{
p0[0][0][i] = tcpre[0][i];
p0[0][i][0] = tcpre[0][i];
p0[i][0][0] = tcpre[0][i];
}
laplacian(temp, p0, element_size);
transfb_cor_e(3, &pcmor_cor[element_size][2], temp);
// situation 4
r_init((double *)p0, NXYZ, 0.0);
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
p0[0][j][i] = tcpre[j][i];
}
}
laplacian(temp, p0, element_size);
transfb_cor_f(4, &pcmor_cor[element_size][3], temp);
// situation 5
r_init((double *)p0, NXYZ, 0.0);
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
p0[0][j][i] = tcpre[j][i];
}
}
for (i = 0; i < LX1; i++)
{
p0[i][0][0] = tcpre[0][i];
}
laplacian(temp, p0, element_size);
transfb_cor_f(5, &pcmor_cor[element_size][4], temp);
// situation 6
r_init((double *)p0, NXYZ, 0.0);
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
p0[0][j][i] = tcpre[j][i];
p0[j][0][i] = tcpre[j][i];
}
}
laplacian(temp, p0, element_size);
transfb_cor_f(6, &pcmor_cor[element_size][5], temp);
// situation 7
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
p0[0][j][i] = tcpre[j][i];
p0[j][0][i] = tcpre[j][i];
p0[j][i][0] = tcpre[j][i];
}
}
laplacian(temp, p0, element_size);
transfb_cor_f(7, &pcmor_cor[element_size][6], temp);
}
}
//------------------------------------------------------------------------
// compute the preconditioner by identifying its geometry configuration
// and sum the values from the precomputed elemental contributions
//------------------------------------------------------------------------
void setpcmo()
{
int face2, nb1, nb2, sizei, imor, _enum, i, j, iel, iside, nn1, nn2;
l_init(ifpcmor, nvertex, 0);
l_init((int *)edgevis, 24 * nelt, 0);
for (iel = 0; iel < nelt; iel++)
{
for (iside = 0; iside < NSIDES; iside++)
{
// for nonconforming faces
if (cbc[iel][iside] == 3)
{
sizei = size_e[iel];
// vertices
// ifpcmor[imor] = 1 indicates that mortar point imor has
// been visited
imor = idmo[iel][iside][0][0][0][0];
if (!ifpcmor[imor])
{
// compute the preconditioner on mortar point imor
pc_corner(imor);
ifpcmor[imor] = 1;
}
imor = idmo[iel][iside][1][0][0][LX1 - 1];
if (!ifpcmor[imor])
{
pc_corner(imor);
ifpcmor[imor] = 1;
}
imor = idmo[iel][iside][0][1][LX1 - 1][0];
if (!ifpcmor[imor])
{
pc_corner(imor);
ifpcmor[imor] = 1;
}
imor = idmo[iel][iside][1][1][LX1 - 1][LX1 - 1];
if (!ifpcmor[imor])
{
pc_corner(imor);
ifpcmor[imor] = 1;
}
// edges on nonconforming faces, _enum is local edge number
for (_enum = 0; _enum < 4; _enum++)
{
// edgevis[iel][iside][_enum]=1 indicates that local edge
// _enum of face iside of iel has been visited
if (!edgevis[iel][iside][_enum])
{
edgevis[iel][iside][_enum] = 1;
// Examing neighbor element information,
// calculateing the preconditioner value.
face2 = f_e_ef[iside][_enum];
if (cbc[iel][face2] == 2)
{
nb1 = sje[iel][face2][0][0];
if (cbc[nb1][iside] == 2)
{
// Compute the preconditioner on local edge _enum on face
// iside of element iel, 1 is neighborhood information got
// by examing neighbors(nb1). For detailed meaning of 1,
// see subroutine com_dpc.
com_dpc(iside, iel, _enum, 1, sizei);
nb2 = sje[nb1][iside][0][0];
edgevis[nb2][jjface[face2]][op[e_face2[iside][_enum]]] = 1;
}
else if (cbc[nb1][iside] == 3)
{
com_dpc(iside, iel, _enum, 2, sizei);
edgevis[nb1][iside][op[_enum]] = 1;
}
}
else if (cbc[iel][face2] == 3)
{
edgevis[iel][face2][e_face2[iside][_enum]] = 1;
nb1 = sje[iel][face2][1][0];
if (cbc[nb1][iside] == 1)
{
com_dpc(iside, iel, _enum, 3, sizei);
nb2 = sje[nb1][iside][0][0];
edgevis[nb2][jjface[iside]][op[_enum]] = 1;
edgevis[nb2][jjface[face2]][op[e_face2[iside][_enum]]] = 1;
}
else if (cbc[nb1][iside] == 2)
{
com_dpc(iside, iel, _enum, 4, sizei);
}
}
else if (cbc[iel][face2] == 0)
{
com_dpc(iside, iel, _enum, 0, sizei);
}
}
}
// mortar element interior (not edge of mortar)
for (nn1 = 0; nn1 < 2; nn1++)
{
for (nn2 = 0; nn2 < 2; nn2++)
{
for (j = 1; j < LX1 - 1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
imor = idmo[iel][iside][nn2][nn1][j][i];
dpcmor[imor] = 1.0 / (pcmor_nc1[sizei][nn2][nn1][j][i] +
pcmor_c[sizei + 1][j][i]);
}
}
}
}
// for i,j=LX1-1 there are duplicated mortar points, so
// pcmor_c needs to be doubled or quadrupled
i = LX1 - 1;
for (j = 1; j < LX1 - 1; j++)
{
imor = idmo[iel][iside][0][0][j][i];
dpcmor[imor] = 1.0 / (pcmor_nc1[sizei][0][0][j][i] +
pcmor_c[sizei + 1][j][i] * 2.0);
imor = idmo[iel][iside][0][1][j][i];
dpcmor[imor] = 1.0 / (pcmor_nc1[sizei][0][1][j][i] +
pcmor_c[sizei + 1][j][i] * 2.0);
}
j = LX1 - 1;
imor = idmo[iel][iside][0][0][j][i];
dpcmor[imor] = 1.0 / (pcmor_nc1[sizei][0][0][j][i] +
pcmor_c[sizei + 1][j][i] * 4.0);
for (i = 1; i < LX1 - 1; i++)
{
imor = idmo[iel][iside][0][0][j][i];
dpcmor[imor] = 1.0 / (pcmor_nc1[sizei][0][0][j][i] +
pcmor_c[sizei + 1][j][i] * 2.0);
imor = idmo[iel][iside][1][0][j][i];
dpcmor[imor] = 1.0 / (pcmor_nc1[sizei][1][0][j][i] +
pcmor_c[sizei + 1][j][i] * 2.0);
}
}
}
}
}
//------------------------------------------------------------------------
// calculate preconditioner value for vertex with mortar index imor
//------------------------------------------------------------------------
void pc_corner(int imor)
{
double tmortemp;
int inemo, ie, sizei, cornernumber;
int sface, sedge, iiface, iface, iiedge, iedge, n = 0;
tmortemp = 0.0;
// loop over all elements sharing this vertex
#pragma omp parallel for default(shared) private(inemo, iiface, iiedge, ie, sizei, cornernumber, sface, sedge, iface, iedge, n) firstprivate(imor, nemo, emo, size_e, f_c, cbc, e_c, ncon_edge, pcmor_cor) reduction(+ : tmortemp)
for (inemo = 0; inemo <= nemo[imor]; inemo++)
{
ie = emo[imor][inemo][0];
sizei = size_e[ie];
cornernumber = emo[imor][inemo][1];
sface = 0;
sedge = 0;
for (iiface = 0; iiface < 3; iiface++)
{
iface = f_c[cornernumber][iiface];
// sface sums the number of nonconforming faces sharing this vertex on
// one element
if (cbc[ie][iface] == 3)
{
sface = sface + 1;
}
}
// sedge sums the number of nonconforming edges sharing this vertex on
// one element
for (iiedge = 0; iiedge < 3; iiedge++)
{
iedge = e_c[cornernumber][iiedge];
if (ncon_edge[ie][iedge]) sedge = sedge + 1;
}
// each n indicates how many nonconforming faces and nonconforming
// edges share this vertex on an element,
if (sface == 0)
{
if (sedge == 0)
{
n = 7;
}
else if (sedge == 1)
{
n = 0;
}
else if (sedge == 2)
{
n = 1;
}
else if (sedge == 3)
{
n = 2;
}
}
else if (sface == 1)
{
if (sedge == 1)
{
n = 4;
}
else
{
n = 3;
}
}
else if (sface == 2)
{
n = 5;
}
else if (sface == 3)
{
n = 6;
}
// sum the intermediate pre-computed preconditioner values for
// all elements
tmortemp = tmortemp + pcmor_cor[sizei][n];
}
// dpcmor[imor] is the value of the preconditioner on mortar point imor
dpcmor[imor] = 1.0 / tmortemp;
}
//------------------------------------------------------------------------
// Compute preconditioner for local edge enumber of face iside
// on element iel.
// isize is element size,
// n is one of five different configurations
// anc1, ac, anc2, anc0 are coefficients for different edges.
// nc0 refers to nonconforming edge shared by two conforming faces
// nc1 refers to nonconforming edge shared by one nonconforming face
// nc2 refers to nonconforming edges shared by two nonconforming faces
// c refers to conforming edge
//------------------------------------------------------------------------
void com_dpc(int iside, int iel, int enumber, int n, int isize)
{
int nn1start, nn1end, nn2start;
int nn2end, jstart, jend, istart, iend, i, j, nn1, nn2, imor = 0;
double anc1, ac, anc2, anc0, temp = 0.0;
// different local edges have different loop ranges
if (enumber == 0)
{
nn1start = 1;
nn1end = 1;
nn2start = 1;
nn2end = 2;
jstart = 1;
jend = 1;
istart = 2;
iend = LX1 - 1;
}
else if (enumber == 1)
{
nn1start = 1;
nn1end = 2;
nn2start = 2;
nn2end = 2;
jstart = 2;
jend = LX1 - 1;
istart = LX1;
iend = LX1;
}
else if (enumber == 2)
{
nn1start = 2;
nn1end = 2;
nn2start = 1;
nn2end = 2;
jstart = LX1;
jend = LX1;
istart = 2;
iend = LX1 - 1;
}
else if (enumber == 3)
{
nn1start = 1;
nn1end = 2;
nn2start = 1;
nn2end = 1;
jstart = 2;
jend = LX1 - 1;
istart = 1;
iend = 1;
}
else
{
// MUST NOT reachable!!
// Commenting assert to make tests portable, macro hard-codes file location
//assert(0);
nn1start = 0;
nn1end = 0;
nn2start = 0;
nn2end = 0;
jstart = 0;
jend = LX1 - 1;
istart = 0;
iend = 0;
}
// among the four elements sharing this edge
// one has a smaller size
if (n == 1)
{
anc1 = 2.0;
ac = 1.0;
anc0 = 1.0;
anc2 = 0.0;
// two (neighbored by a face) are of smaller size
}
else if (n == 2)
{
anc1 = 2.0;
ac = 2.0;
anc0 = 0.0;
anc2 = 0.0;
// two (neighbored by an edge) are of smaller size
}
else if (n == 3)
{
anc2 = 2.0;
ac = 2.0;
anc1 = 0.0;
anc0 = 0.0;
// three are of smaller size
}
else if (n == 4)
{
anc1 = 0.0;
ac = 3.0;
anc2 = 1.0;
anc0 = 0.0;
// on the boundary
}
else if (n == 0)
{
anc1 = 1.0;
ac = 1.0;
anc2 = 0.0;
anc0 = 0.0;
}
else
{
// MUST NOT reachable!!
// Commenting assert to make tests portable, macro hard-codes file location
//assert(0);
anc1 = 0.0;
ac = 0.0;
anc2 = 0.0;
anc0 = 0.0;
}
// edge interior
for (nn2 = nn2start - 1; nn2 < nn2end; nn2++)
{
for (nn1 = nn1start - 1; nn1 < nn1end; nn1++)
{
for (j = jstart - 1; j < jend; j++)
{
for (i = istart - 1; i < iend; i++)
{
imor = idmo[iel][iside][nn2][nn1][j][i];
temp = anc1 * pcmor_nc1[isize][nn2][nn1][j][i] +
ac * pcmor_c[isize + 1][j][i] +
anc0 * pcmor_nc0[isize][nn2][nn1][j][i] +
anc2 * pcmor_nc2[isize][nn2][nn1][j][i];
dpcmor[imor] = 1.0 / temp;
}
}
}
}
// local edge 0
if (enumber == 0)
{
imor = idmo[iel][iside][0][0][0][LX1 - 1];
temp = anc1 * pcmor_nc1[isize][0][0][0][LX1 - 1] +
ac * pcmor_c[isize + 1][0][LX1 - 1] * 2.0 +
anc0 * pcmor_nc0[isize][0][0][0][LX1 - 1] +
anc2 * pcmor_nc2[isize][0][0][0][LX1 - 1];
// local edge 1
}
else if (enumber == 1)
{
imor = idmo[iel][iside][1][0][LX1 - 1][LX1 - 1];
temp = anc1 * pcmor_nc1[isize][1][0][LX1 - 1][LX1 - 1] +
ac * pcmor_c[isize + 1][LX1 - 1][LX1 - 1] * 2.0 +
anc0 * pcmor_nc0[isize][1][0][LX1 - 1][LX1 - 1] +
anc2 * pcmor_nc2[isize][1][0][LX1 - 1][LX1 - 1];
// local edge 2
}
else if (enumber == 2)
{
imor = idmo[iel][iside][0][1][LX1 - 1][LX1 - 1];
temp = anc1 * pcmor_nc1[isize][0][1][LX1 - 1][LX1 - 1] +
ac * pcmor_c[isize + 1][LX1 - 1][LX1 - 1] * 2.0 +
anc0 * pcmor_nc0[isize][0][1][LX1 - 1][LX1 - 1] +
anc2 * pcmor_nc2[isize][0][1][LX1 - 1][LX1 - 1];
// local edge 3
}
else if (enumber == 3)
{
imor = idmo[iel][iside][0][0][LX1 - 1][0];
temp = anc1 * pcmor_nc1[isize][0][0][LX1 - 1][0] +
ac * pcmor_c[isize + 1][LX1 - 1][0] * 2.0 +
anc0 * pcmor_nc0[isize][0][0][LX1 - 1][0] +
anc2 * pcmor_nc2[isize][0][0][LX1 - 1][0];
}
dpcmor[imor] = 1.0 / temp;
}
void create_initial_grid()
{
int i;
nelt = 1;
ntot = nelt * LX1 * LX1 * LX1;
tree[0] = 1;
mt_to_id[0] = 0;
for (i = 0; i < 7; i += 2)
{
xc[0][i] = 0.0;
xc[0][i + 1] = 1.0;
}
for (i = 0; i < 2; i++)
{
yc[0][i] = 0.0;
yc[0][2 + i] = 1.0;
yc[0][4 + i] = 0.0;
yc[0][6 + i] = 1.0;
}
for (i = 0; i < 4; i++)
{
zc[0][i] = 0.0;
zc[0][4 + i] = 1.0;
}
for (i = 0; i < 6; i++)
{
cbc[0][i] = 0;
}
}
//-----------------------------------------------------------------
//
// generate
//
// - collocation points
// - weights
// - derivative matrices
// - projection matrices
// - interpolation matrices
//
// associated with the
//
// - gauss-legendre lobatto mesh (suffix m1)
//
//----------------------------------------------------------------
void coef()
{
int i, j, k;
// for gauss-legendre lobatto mesh (suffix m1)
// generate collocation points and weights
zgm1[0] = -1.0;
zgm1[1] = -0.65465367070797710;
zgm1[2] = 0.0;
zgm1[3] = 0.65465367070797710;
zgm1[4] = 1.0;
wxm1[0] = 0.10;
wxm1[1] = 49.0 / 90.0;
wxm1[2] = 32.0 / 45.0;
wxm1[3] = wxm1[1];
wxm1[4] = 0.1;
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
w3m1[k][j][i] = wxm1[i] * wxm1[j] * wxm1[k];
}
}
}
// generate derivative matrices
dxm1[0][0] = -5.0;
dxm1[0][1] = -1.240990253030982;
dxm1[0][2] = 0.375;
dxm1[0][3] = -0.2590097469690172;
dxm1[0][4] = 0.5;
dxm1[1][0] = 6.756502488724238;
dxm1[1][1] = 0.0;
dxm1[1][2] = -1.336584577695453;
dxm1[1][3] = 0.7637626158259734;
dxm1[1][4] = -1.410164177942427;
dxm1[2][0] = -2.666666666666667;
dxm1[2][1] = 1.745743121887939;
dxm1[2][2] = 0.0;
dxm1[2][3] = -dxm1[2][1];
dxm1[2][4] = -dxm1[2][0];
for (j = 3; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
dxm1[j][i] = -dxm1[LX1 - 1 - j][LX1 - 1 - i];
}
}
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
dxtm1[j][i] = dxm1[i][j];
}
}
// generate projection (mapping) matrices
qbnew[0][0][0] = -0.1772843218615690;
qbnew[0][0][1] = 9.375e-02;
qbnew[0][0][2] = -3.700139242414530e-02;
qbnew[0][1][0] = 0.7152146412463197;
qbnew[0][1][1] = -0.2285757930375471;
qbnew[0][1][2] = 8.333333333333333e-02;
qbnew[0][2][0] = 0.4398680650316104;
qbnew[0][2][1] = 0.2083333333333333;
qbnew[0][2][2] = -5.891568407922938e-02;
qbnew[0][3][0] = 8.333333333333333e-02;
qbnew[0][3][1] = 0.3561799597042137;
qbnew[0][3][2] = -4.854797457965334e-02;
qbnew[0][4][0] = 0.0;
qbnew[0][4][1] = 7.03125e-02;
qbnew[0][4][2] = 0.0;
for (j = 0; j < LX1; j++)
{
for (i = 0; i < 3; i++)
{
qbnew[1][j][i] = qbnew[0][LX1 - 1 - j][2 - i];
}
}
// generate interpolation matrices for mesh refinement
ixtmc1[0][0] = 1.0;
ixtmc1[0][1] = 0.0;
ixtmc1[0][2] = 0.0;
ixtmc1[0][3] = 0.0;
ixtmc1[0][4] = 0.0;
ixtmc1[1][0] = 0.3385078435248143;
ixtmc1[1][1] = 0.7898516348912331;
ixtmc1[1][2] = -0.1884018684471238;
ixtmc1[1][3] = 9.202967302175333e-02;
ixtmc1[1][4] = -3.198728299067715e-02;
ixtmc1[2][0] = -0.1171875;
ixtmc1[2][1] = 0.8840317166357952;
ixtmc1[2][2] = 0.3125;
ixtmc1[2][3] = -0.118406716635795;
ixtmc1[2][4] = 0.0390625;
ixtmc1[3][0] = -7.065070066767144e-02;
ixtmc1[3][1] = 0.2829703269782467;
ixtmc1[3][2] = 0.902687582732838;
ixtmc1[3][3] = -0.1648516348912333;
ixtmc1[3][4] = 4.984442584781999e-02;
ixtmc1[4][0] = 0.0;
ixtmc1[4][1] = 0.0;
ixtmc1[4][2] = 1.0;
ixtmc1[4][3] = 0.0;
ixtmc1[4][4] = 0.0;
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
ixmc1[j][i] = ixtmc1[i][j];
}
}
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
ixtmc2[j][i] = ixtmc1[LX1 - 1 - j][LX1 - 1 - i];
}
}
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
ixmc2[j][i] = ixtmc2[i][j];
}
}
// solution interpolation matrix for mesh coarsening
map2[0] = -0.1179652785083428;
map2[1] = 0.5505046330389332;
map2[2] = 0.7024534364259963;
map2[3] = -0.1972224518285866;
map2[4] = 6.222966087199998e-02;
for (i = 0; i < LX1; i++)
{
map4[i] = map2[LX1 - 1 - i];
}
}
//-------------------------------------------------------------------
//
// routine to generate elemental geometry information on mesh m1,
// (gauss-legendre lobatto mesh).
//
// xrm1_s - dx/dr, dy/dr, dz/dr
// rxm1_s - dr/dx, dr/dy, dr/dz
// g1m1_s geometric factors used in preconditioner computation
// g4m1_s g5m1_s g6m1_s :
// geometric factors used in lapacian opertor
// jacm1 - jacobian
// bm1 - mass matrix
// xfrac - will be used in prepwork for calculating collocation
// coordinates
// idel - collocation points index on element boundaries
//------------------------------------------------------------------
void geom1()
{
double temp, temp1, temp2, dtemp;
int isize, i, j, k, ntemp, iel;
for (i = 0; i < LX1; i++)
{
xfrac[i] = zgm1[i] * 0.5 + 0.5;
}
for (isize = 0; isize < REFINE_MAX; isize++)
{
temp = pow(2.0, (-isize - 2));
dtemp = 1.0 / temp;
temp1 = temp * temp * temp;
temp2 = temp * temp;
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
xrm1_s[isize][k][j][i] = dtemp;
jacm1_s[isize][k][j][i] = temp1;
rxm1_s[isize][k][j][i] = temp2;
g1m1_s[isize][k][j][i] = w3m1[k][j][i] * temp;
bm1_s[isize][k][j][i] = w3m1[k][j][i] * temp1;
g4m1_s[isize][k][j][i] = g1m1_s[isize][k][j][i] / wxm1[i];
g5m1_s[isize][k][j][i] = g1m1_s[isize][k][j][i] / wxm1[j];
g6m1_s[isize][k][j][i] = g1m1_s[isize][k][j][i] / wxm1[k];
}
}
}
}
#pragma omp parallel for default(shared) private(iel, j, i, ntemp)
for (iel = 0; iel < LELT; iel++)
{
ntemp = LX1 * LX1 * LX1 * iel;
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
idel[iel][0][j][i] = ntemp + i * LX1 + j * LX1 * LX1 + LX1 - 1;
idel[iel][1][j][i] = ntemp + i * LX1 + j * LX1 * LX1;
idel[iel][2][j][i] = ntemp + i * 1 + j * LX1 * LX1 + LX1 * (LX1 - 1);
idel[iel][3][j][i] = ntemp + i * 1 + j * LX1 * LX1;
idel[iel][4][j][i] = ntemp + i * 1 + j * LX1 + LX1 * LX1 * (LX1 - 1);
idel[iel][5][j][i] = ntemp + i * 1 + j * LX1;
}
}
}
}
//------------------------------------------------------------------
// compute the discrete laplacian operators
//------------------------------------------------------------------
void setdef()
{
int i, j, ip;
r_init(wdtdr[0], LX1 * LX1, 0.0);
for (i = 0; i < LX1; i++)
{
for (j = 0; j < LX1; j++)
{
for (ip = 0; ip < LX1; ip++)
{
wdtdr[j][i] = wdtdr[j][i] + wxm1[ip] * dxm1[i][ip] * dxm1[j][ip];
}
}
}
}
//------------------------------------------------------------------
// mesh information preparations: calculate refinement levels of
// each element, mask matrix for domain boundary and element
// boundaries
//------------------------------------------------------------------
void prepwork()
{
int i, j, iel, iface, cb;
double rdlog2;
ntot = nelt * NXYZ;
rdlog2 = 1.0 / log(2.0);
// calculate the refinement levels of each element
#pragma omp parallel for default(shared) private(iel) firstprivate(nelt, rdlog2, xc)
for (iel = 0; iel < nelt; iel++)
{
size_e[iel] = (int)(-log(xc[iel][1] - xc[iel][0]) * rdlog2 + 1.e-8) - 1;
}
// mask matrix for element boundary
#pragma omp parallel for default(shared) private(iel, iface) firstprivate(nelt)
for (iel = 0; iel < nelt; iel++)
{
r_init(tmult[iel][0][0], NXYZ, 1.0);
for (iface = 0; iface < NSIDES; iface++)
{
facev(tmult[iel], iface, 0.0);
}
}
// masks for domain boundary at mortar
r_init(tmmor, nmor, 1.0);
for (iel = 0; iel < nelt; iel++)
{
for (iface = 0; iface < NSIDES; iface++)
{
cb = cbc[iel][iface];
if (cb == 0)
{
for (j = 1; j < LX1 - 1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
tmmor[idmo[iel][iface][0][0][j][i]] = 0.0;
}
}
j = 0;
for (i = 0; i < LX1 - 1; i++)
{
tmmor[idmo[iel][iface][0][0][j][i]] = 0.0;
}
if (idmo[iel][iface][0][0][0][LX1 - 1] == -1)
{
tmmor[idmo[iel][iface][1][0][0][LX1 - 1]] = 0.0;
}
else
{
tmmor[idmo[iel][iface][0][0][0][LX1 - 1]] = 0.0;
for (i = 0; i < LX1; i++)
{
tmmor[idmo[iel][iface][1][0][j][i]] = 0.0;
}
}
i = LX1 - 1;
if (idmo[iel][iface][1][0][1][LX1 - 1] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
tmmor[idmo[iel][iface][0][0][j][i]] = 0.0;
}
tmmor[idmo[iel][iface][1][1][LX1 - 1][LX1 - 1]] = 0.0;
}
else
{
for (j = 1; j < LX1; j++)
{
tmmor[idmo[iel][iface][1][0][j][i]] = 0.0;
}
for (j = 0; j < LX1; j++)
{
tmmor[idmo[iel][iface][1][1][j][i]] = 0.0;
}
}
j = LX1 - 1;
tmmor[idmo[iel][iface][0][1][LX1 - 1][0]] = 0.0;
if (idmo[iel][iface][0][1][LX1 - 1][1] == -1)
{
for (i = 1; i < LX1 - 1; i++)
{
tmmor[idmo[iel][iface][0][0][j][i]] = 0.0;
}
}
else
{
for (i = 1; i < LX1; i++)
{
tmmor[idmo[iel][iface][0][1][j][i]] = 0.0;
}
for (i = 0; i < LX1 - 1; i++)
{
tmmor[idmo[iel][iface][1][1][j][i]] = 0.0;
}
}
i = 0;
for (j = 1; j < LX1 - 1; j++)
{
tmmor[idmo[iel][iface][0][0][j][i]] = 0.0;
}
if (idmo[iel][iface][0][0][LX1 - 1][0] != -1)
{
tmmor[idmo[iel][iface][0][0][LX1 - 1][i]] = 0.0;
for (j = 0; j < LX1 - 1; j++)
{
tmmor[idmo[iel][iface][0][1][j][i]] = 0.0;
}
}
}
}
}
}
//------------------------------------------------------------------
// We store some tables of useful topoint constants
//------------------------------------------------------------------
void top_constants()
{
//f_e_ef[f][e] returns the other face sharing the e'th local edge of face f.
f_e_ef[0][0] = 5;
f_e_ef[0][1] = 2;
f_e_ef[0][2] = 4;
f_e_ef[0][3] = 3;
f_e_ef[1][0] = 5;
f_e_ef[1][1] = 2;
f_e_ef[1][2] = 4;
f_e_ef[1][3] = 3;
f_e_ef[2][0] = 5;
f_e_ef[2][1] = 0;
f_e_ef[2][2] = 4;
f_e_ef[2][3] = 1;
f_e_ef[3][0] = 5;
f_e_ef[3][1] = 0;
f_e_ef[3][2] = 4;
f_e_ef[3][3] = 1;
f_e_ef[4][0] = 3;
f_e_ef[4][1] = 0;
f_e_ef[4][2] = 2;
f_e_ef[4][3] = 1;
f_e_ef[5][0] = 3;
f_e_ef[5][1] = 0;
f_e_ef[5][2] = 2;
f_e_ef[5][3] = 1;
// e_c[j][n] returns n'th edge sharing the vertex j of an element
e_c[0][0] = 4;
e_c[0][1] = 7;
e_c[0][2] = 10;
e_c[1][0] = 0;
e_c[1][1] = 3;
e_c[1][2] = 10;
e_c[2][0] = 4;
e_c[2][1] = 5;
e_c[2][2] = 8;
e_c[3][0] = 0;
e_c[3][1] = 1;
e_c[3][2] = 8;
e_c[4][0] = 6;
e_c[4][1] = 7;
e_c[4][2] = 11;
e_c[5][0] = 2;
e_c[5][1] = 3;
e_c[5][2] = 11;
e_c[6][0] = 5;
e_c[6][1] = 6;
e_c[6][2] = 9;
e_c[7][0] = 1;
e_c[7][1] = 2;
e_c[7][2] = 9;
// local_corner[i][n] returns the local corner index of vertex n on face i
local_corner[0][0] = -1;
local_corner[0][1] = 0;
local_corner[0][2] = -1;
local_corner[0][3] = 1;
local_corner[0][4] = -1;
local_corner[0][5] = 2;
local_corner[0][6] = -1;
local_corner[0][7] = 3;
local_corner[1][0] = 0;
local_corner[1][1] = -1;
local_corner[1][2] = 1;
local_corner[1][3] = -1;
local_corner[1][4] = 2;
local_corner[1][5] = -1;
local_corner[1][6] = 3;
local_corner[1][7] = -1;
local_corner[2][0] = -1;
local_corner[2][1] = -1;
local_corner[2][2] = 0;
local_corner[2][3] = 1;
local_corner[2][4] = -1;
local_corner[2][5] = -1;
local_corner[2][6] = 2;
local_corner[2][7] = 3;
local_corner[3][0] = 0;
local_corner[3][1] = 1;
local_corner[3][2] = -1;
local_corner[3][3] = -1;
local_corner[3][4] = 2;
local_corner[3][5] = 3;
local_corner[3][6] = -1;
local_corner[3][7] = -1;
local_corner[4][0] = -1;
local_corner[4][1] = -1;
local_corner[4][2] = -1;
local_corner[4][3] = -1;
local_corner[4][4] = 0;
local_corner[4][5] = 1;
local_corner[4][6] = 2;
local_corner[4][7] = 3;
local_corner[5][0] = 0;
local_corner[5][1] = 1;
local_corner[5][2] = 2;
local_corner[5][3] = 3;
local_corner[5][4] = -1;
local_corner[5][5] = -1;
local_corner[5][6] = -1;
local_corner[5][7] = -1;
// cal_nnb[i][n] returns the neighbor elements neighbored by n'th edge
// among the three edges sharing vertex i
// the elements are the eight children elements ordered as 0 to 7.
cal_nnb[0][0] = 4;
cal_nnb[0][1] = 1;
cal_nnb[0][2] = 2;
cal_nnb[1][0] = 5;
cal_nnb[1][1] = 0;
cal_nnb[1][2] = 3;
cal_nnb[2][0] = 6;
cal_nnb[2][1] = 3;
cal_nnb[2][2] = 0;
cal_nnb[3][0] = 7;
cal_nnb[3][1] = 2;
cal_nnb[3][2] = 1;
cal_nnb[4][0] = 0;
cal_nnb[4][1] = 5;
cal_nnb[4][2] = 6;
cal_nnb[5][0] = 1;
cal_nnb[5][1] = 4;
cal_nnb[5][2] = 7;
cal_nnb[6][0] = 2;
cal_nnb[6][1] = 7;
cal_nnb[6][2] = 4;
cal_nnb[7][0] = 3;
cal_nnb[7][1] = 6;
cal_nnb[7][2] = 5;
// returns the opposite local corner index: 0-3,1-2
oplc[0] = 3;
oplc[1] = 2;
oplc[2] = 1;
oplc[3] = 0;
// cal_iijj[n][i] returns the location of local corner number n on a face
// i =0 to get ii, i=1 to get jj
// (ii,jj) is defined the same as in mortar location (ii,jj)
cal_iijj[0][0] = 0;
cal_iijj[0][1] = 0;
cal_iijj[1][0] = 0;
cal_iijj[1][1] = 1;
cal_iijj[2][0] = 1;
cal_iijj[2][1] = 0;
cal_iijj[3][0] = 1;
cal_iijj[3][1] = 1;
// returns the adjacent(neighbored by a face) element's children,
// assumming a vertex is shared by eight child elements 0-7.
// index n is local corner number on the face which is being
// assigned the mortar index number
cal_intempx[0][0] = 7;
cal_intempx[0][1] = 5;
cal_intempx[0][2] = 3;
cal_intempx[0][3] = 1;
cal_intempx[1][0] = 6;
cal_intempx[1][1] = 4;
cal_intempx[1][2] = 2;
cal_intempx[1][3] = 0;
cal_intempx[2][0] = 7;
cal_intempx[2][1] = 6;
cal_intempx[2][2] = 3;
cal_intempx[2][3] = 2;
cal_intempx[3][0] = 5;
cal_intempx[3][1] = 4;
cal_intempx[3][2] = 1;
cal_intempx[3][3] = 0;
cal_intempx[4][0] = 7;
cal_intempx[4][1] = 6;
cal_intempx[4][2] = 5;
cal_intempx[4][3] = 4;
cal_intempx[5][0] = 3;
cal_intempx[5][1] = 2;
cal_intempx[5][2] = 1;
cal_intempx[5][3] = 0;
// c_f[f][i] returns the vertex number of i'th local corner on face f
c_f[0][0] = 1;
c_f[0][1] = 3;
c_f[0][2] = 5;
c_f[0][3] = 7;
c_f[1][0] = 0;
c_f[1][1] = 2;
c_f[1][2] = 4;
c_f[1][3] = 6;
c_f[2][0] = 2;
c_f[2][1] = 3;
c_f[2][2] = 6;
c_f[2][3] = 7;
c_f[3][0] = 0;
c_f[3][1] = 1;
c_f[3][2] = 4;
c_f[3][3] = 5;
c_f[4][0] = 4;
c_f[4][1] = 5;
c_f[4][2] = 6;
c_f[4][3] = 7;
c_f[5][0] = 0;
c_f[5][1] = 1;
c_f[5][2] = 2;
c_f[5][3] = 3;
// on each face of the parent element, there are four children element.
//le_arr[n][j][i] returns the i'th elements among the four children elements
// n refers to the direction: 1 for x, 2 for y and 3 for z direction.
// j refers to positive(0) or negative(1) direction on x, y or z direction.
// n=1,j=0 refers to face 1 and n=1, j=1 refers to face 2, n=2,j=0 refers to
// face 3....
// The current eight children are ordered as 8,1,2,3,4,5,6,7
le_arr[0][0][0] = 7;
le_arr[0][0][1] = 1;
le_arr[0][0][2] = 3;
le_arr[0][0][3] = 5;
le_arr[0][1][0] = 0;
le_arr[0][1][1] = 2;
le_arr[0][1][2] = 4;
le_arr[0][1][3] = 6;
le_arr[1][0][0] = 7;
le_arr[1][0][1] = 0;
le_arr[1][0][2] = 3;
le_arr[1][0][3] = 4;
le_arr[1][1][0] = 1;
le_arr[1][1][1] = 2;
le_arr[1][1][2] = 5;
le_arr[1][1][3] = 6;
le_arr[2][0][0] = 7;
le_arr[2][0][1] = 0;
le_arr[2][0][2] = 1;
le_arr[2][0][3] = 2;
le_arr[2][1][0] = 3;
le_arr[2][1][1] = 4;
le_arr[2][1][2] = 5;
le_arr[2][1][3] = 6;
// jjface[n] returns the face opposite to face n
jjface[0] = 1;
jjface[1] = 0;
jjface[2] = 3;
jjface[3] = 2;
jjface[4] = 5;
jjface[5] = 4;
// edgeface[f][n] returns OTHER face which shares local edge n on face f
// int edgeface[6][4];
// edgeface[0][0] = 5;
// edgeface[0][1] = 2;
// edgeface[0][2] = 4;
// edgeface[0][3] = 3;
// edgeface[1][0] = 5;
// edgeface[1][1] = 2;
// edgeface[1][2] = 4;
// edgeface[1][3] = 3;
// edgeface[2][0] = 5;
// edgeface[2][1] = 0;
// edgeface[2][2] = 4;
// edgeface[2][3] = 1;
// edgeface[3][0] = 5;
// edgeface[3][1] = 0;
// edgeface[3][2] = 4;
// edgeface[3][3] = 1;
// edgeface[4][0] = 3;
// edgeface[4][1] = 0;
// edgeface[4][2] = 2;
// edgeface[4][3] = 1;
// edgeface[5][0] = 3;
// edgeface[5][1] = 0;
// edgeface[5][2] = 2;
// edgeface[5][3] = 1;
// e_face2[f][n] returns the local edge number of edge n on the
// other face sharing local edge n on face f
e_face2[0][0] = 1;
e_face2[0][1] = 1;
e_face2[0][2] = 1;
e_face2[0][3] = 1;
e_face2[1][0] = 3;
e_face2[1][1] = 3;
e_face2[1][2] = 3;
e_face2[1][3] = 3;
e_face2[2][0] = 2;
e_face2[2][1] = 1;
e_face2[2][2] = 2;
e_face2[2][3] = 1;
e_face2[3][0] = 0;
e_face2[3][1] = 3;
e_face2[3][2] = 0;
e_face2[3][3] = 3;
e_face2[4][0] = 2;
e_face2[4][1] = 2;
e_face2[4][2] = 2;
e_face2[4][3] = 2;
e_face2[5][0] = 0;
e_face2[5][1] = 0;
e_face2[5][2] = 0;
e_face2[5][3] = 0;
// op[n] returns the local edge number of the edge which
// is opposite to local edge n on the same face
op[0] = 2;
op[1] = 3;
op[2] = 0;
op[3] = 1;
// localedgenumber[e][f] returns the local edge number for edge e
// on face f. A minus result value signifies illegal input
localedgenumber[0][0] = 0;
localedgenumber[0][1] = -1;
localedgenumber[0][2] = -1;
localedgenumber[0][3] = -1;
localedgenumber[0][4] = -1;
localedgenumber[0][5] = 1;
localedgenumber[1][0] = 1;
localedgenumber[1][1] = -1;
localedgenumber[1][2] = 1;
localedgenumber[1][3] = -1;
localedgenumber[1][4] = -1;
localedgenumber[1][5] = -1;
localedgenumber[2][0] = 2;
localedgenumber[2][1] = -1;
localedgenumber[2][2] = -1;
localedgenumber[2][3] = -1;
localedgenumber[2][4] = 1;
localedgenumber[2][5] = -1;
localedgenumber[3][0] = 3;
localedgenumber[3][1] = -1;
localedgenumber[3][2] = -1;
localedgenumber[3][3] = 1;
localedgenumber[3][4] = -1;
localedgenumber[3][5] = -1;
localedgenumber[4][0] = -1;
localedgenumber[4][1] = 0;
localedgenumber[4][2] = -1;
localedgenumber[4][3] = -1;
localedgenumber[4][4] = -1;
localedgenumber[4][5] = 3;
localedgenumber[5][0] = -1;
localedgenumber[5][1] = 1;
localedgenumber[5][2] = 3;
localedgenumber[5][3] = -1;
localedgenumber[5][4] = -1;
localedgenumber[5][5] = -1;
localedgenumber[6][0] = -1;
localedgenumber[6][1] = 2;
localedgenumber[6][2] = -1;
localedgenumber[6][3] = -1;
localedgenumber[6][4] = 3;
localedgenumber[6][5] = -1;
localedgenumber[7][0] = -1;
localedgenumber[7][1] = 3;
localedgenumber[7][2] = -1;
localedgenumber[7][3] = 3;
localedgenumber[7][4] = -1;
localedgenumber[7][5] = -1;
localedgenumber[8][0] = -1;
localedgenumber[8][1] = -1;
localedgenumber[8][2] = 0;
localedgenumber[8][3] = -1;
localedgenumber[8][4] = -1;
localedgenumber[8][5] = 2;
localedgenumber[9][0] = -1;
localedgenumber[9][1] = -1;
localedgenumber[9][2] = 2;
localedgenumber[9][3] = -1;
localedgenumber[9][4] = 2;
localedgenumber[9][5] = -1;
localedgenumber[10][0] = -1;
localedgenumber[10][1] = -1;
localedgenumber[10][2] = -1;
localedgenumber[10][3] = 0;
localedgenumber[10][4] = -1;
localedgenumber[10][5] = 0;
localedgenumber[11][0] = -1;
localedgenumber[11][1] = -1;
localedgenumber[11][2] = -1;
localedgenumber[11][3] = 2;
localedgenumber[11][4] = 0;
localedgenumber[11][5] = -1;
// edgenumber[f][e] returns the edge index of local edge e on face f
edgenumber[0][0] = 0;
edgenumber[0][1] = 1;
edgenumber[0][2] = 2;
edgenumber[0][3] = 3;
edgenumber[1][0] = 4;
edgenumber[1][1] = 5;
edgenumber[1][2] = 6;
edgenumber[1][3] = 7;
edgenumber[2][0] = 8;
edgenumber[2][1] = 1;
edgenumber[2][2] = 9;
edgenumber[2][3] = 5;
edgenumber[3][0] = 10;
edgenumber[3][1] = 3;
edgenumber[3][2] = 11;
edgenumber[3][3] = 7;
edgenumber[4][0] = 11;
edgenumber[4][1] = 2;
edgenumber[4][2] = 9;
edgenumber[4][3] = 6;
edgenumber[5][0] = 10;
edgenumber[5][1] = 0;
edgenumber[5][2] = 8;
edgenumber[5][3] = 4;
// f_c[n][c] returns the face index of i'th face sharing vertex n
f_c[0][0] = 1;
f_c[0][1] = 3;
f_c[0][2] = 5;
f_c[1][0] = 0;
f_c[1][1] = 3;
f_c[1][2] = 5;
f_c[2][0] = 1;
f_c[2][1] = 2;
f_c[2][2] = 5;
f_c[3][0] = 0;
f_c[3][1] = 2;
f_c[3][2] = 5;
f_c[4][0] = 1;
f_c[4][1] = 3;
f_c[4][2] = 4;
f_c[5][0] = 0;
f_c[5][1] = 3;
f_c[5][2] = 4;
f_c[6][0] = 1;
f_c[6][1] = 2;
f_c[6][2] = 4;
f_c[7][0] = 0;
f_c[7][1] = 2;
f_c[7][2] = 4;
// if two elements are neighbor by one edge,
// e1v1[f2][f1] returns the smaller index of the two vertices on this
// edge on one element
// e1v2 returns the larger index of the two vertices of this edge on
// on element. exfor a vertex on element
// e2v1 returns the smaller index of the two vertices on this edge on
// another element
// e2v2 returns the larger index of the two vertiex on this edge on
// another element
//e1v1
e1v1[0][0] = -1;
e1v1[0][1] = -1;
e1v1[0][2] = 3;
e1v1[0][3] = 1;
e1v1[0][4] = 5;
e1v1[0][5] = 1;
e1v1[1][0] = -1;
e1v1[1][1] = -1;
e1v1[1][2] = 2;
e1v1[1][3] = 0;
e1v1[1][4] = 4;
e1v1[1][5] = 0;
e1v1[2][0] = 3;
e1v1[2][1] = 2;
e1v1[2][2] = -1;
e1v1[2][3] = -1;
e1v1[2][4] = 6;
e1v1[2][5] = 2;
e1v1[3][0] = 1;
e1v1[3][1] = 0;
e1v1[3][2] = -1;
e1v1[3][3] = -1;
e1v1[3][4] = 4;
e1v1[3][5] = 0;
e1v1[4][0] = 5;
e1v1[4][1] = 4;
e1v1[4][2] = 6;
e1v1[4][3] = 4;
e1v1[4][4] = -1;
e1v1[4][5] = -1;
e1v1[5][0] = 1;
e1v1[5][1] = 0;
e1v1[5][2] = 2;
e1v1[5][3] = 0;
e1v1[5][4] = -1;
e1v1[5][5] = -1;
//e2v1
e2v1[0][0] = -1;
e2v1[0][1] = -1;
e2v1[0][2] = 0;
e2v1[0][3] = 2;
e2v1[0][4] = 0;
e2v1[0][5] = 4;
e2v1[1][0] = -1;
e2v1[1][1] = -1;
e2v1[1][2] = 1;
e2v1[1][3] = 3;
e2v1[1][4] = 1;
e2v1[1][5] = 5;
e2v1[2][0] = 0;
e2v1[2][1] = 1;
e2v1[2][2] = -1;
e2v1[2][3] = -1;
e2v1[2][4] = 0;
e2v1[2][5] = 4;
e2v1[3][0] = 2;
e2v1[3][1] = 3;
e2v1[3][2] = -1;
e2v1[3][3] = -1;
e2v1[3][4] = 2;
e2v1[3][5] = 6;
e2v1[4][0] = 0;
e2v1[4][1] = 1;
e2v1[4][2] = 0;
e2v1[4][3] = 2;
e2v1[4][4] = -1;
e2v1[4][5] = -1;
e2v1[5][0] = 4;
e2v1[5][1] = 5;
e2v1[5][2] = 4;
e2v1[5][3] = 6;
e2v1[5][4] = -1;
e2v1[5][5] = -1;
//e1v2
e1v2[0][0] = -1;
e1v2[0][1] = -1;
e1v2[0][2] = 7;
e1v2[0][3] = 5;
e1v2[0][4] = 7;
e1v2[0][5] = 3;
e1v2[1][0] = -1;
e1v2[1][1] = -1;
e1v2[1][2] = 6;
e1v2[1][3] = 4;
e1v2[1][4] = 6;
e1v2[1][5] = 2;
e1v2[2][0] = 7;
e1v2[2][1] = 6;
e1v2[2][2] = -1;
e1v2[2][3] = -1;
e1v2[2][4] = 7;
e1v2[2][5] = 3;
e1v2[3][0] = 5;
e1v2[3][1] = 4;
e1v2[3][2] = -1;
e1v2[3][3] = -1;
e1v2[3][4] = 5;
e1v2[3][5] = 1;
e1v2[4][0] = 7;
e1v2[4][1] = 6;
e1v2[4][2] = 7;
e1v2[4][3] = 5;
e1v2[4][4] = -1;
e1v2[4][5] = -1;
e1v2[5][0] = 3;
e1v2[5][1] = 2;
e1v2[5][2] = 3;
e1v2[5][3] = 1;
e1v2[5][4] = -1;
e1v2[5][5] = -1;
//e2v2
e2v2[0][0] = -1;
e2v2[0][1] = -1;
e2v2[0][2] = 4;
e2v2[0][3] = 6;
e2v2[0][4] = 2;
e2v2[0][5] = 6;
e2v2[1][0] = -1;
e2v2[1][1] = -1;
e2v2[1][2] = 5;
e2v2[1][3] = 7;
e2v2[1][4] = 3;
e2v2[1][5] = 7;
e2v2[2][0] = 4;
e2v2[2][1] = 5;
e2v2[2][2] = -1;
e2v2[2][3] = -1;
e2v2[2][4] = 1;
e2v2[2][5] = 5;
e2v2[3][0] = 6;
e2v2[3][1] = 7;
e2v2[3][2] = -1;
e2v2[3][3] = -1;
e2v2[3][4] = 3;
e2v2[3][5] = 7;
e2v2[4][0] = 2;
e2v2[4][1] = 3;
e2v2[4][2] = 1;
e2v2[4][3] = 3;
e2v2[4][4] = -1;
e2v2[4][5] = -1;
e2v2[5][0] = 6;
e2v2[5][1] = 7;
e2v2[5][2] = 5;
e2v2[5][3] = 7;
e2v2[5][4] = -1;
e2v2[5][5] = -1;
// children[n][n1] returns the four elements among the eight children
// elements to be merged on face n of the parent element
// the IDs for the eight children are 0,1,2,3,4,5,6,7
children[0][0] = 1;
children[0][1] = 3;
children[0][2] = 5;
children[0][3] = 7;
children[1][0] = 0;
children[1][1] = 2;
children[1][2] = 4;
children[1][3] = 6;
children[2][0] = 2;
children[2][1] = 3;
children[2][2] = 6;
children[2][3] = 7;
children[3][0] = 0;
children[3][1] = 1;
children[3][2] = 4;
children[3][3] = 5;
children[4][0] = 4;
children[4][1] = 5;
children[4][2] = 6;
children[4][3] = 7;
children[5][0] = 0;
children[5][1] = 1;
children[5][2] = 2;
children[5][3] = 3;
// iijj[n][n1] returns the location of n's mortar on an element face
// n1=0 refers to x direction location and n1=1 refers to y direction
iijj[0][0] = 0;
iijj[0][1] = 0;
iijj[1][0] = 0;
iijj[1][1] = 1;
iijj[2][0] = 1;
iijj[2][1] = 0;
iijj[3][0] = 1;
iijj[3][1] = 1;
// v_end[n] returns the index of collocation points at two ends of each
// direction
v_end[0] = 0;
v_end[1] = LX1 - 1;
//face_l1,face_l2,face_ld return for start,end,stride for a loop over faces
// used on subroutine mortar_vertex
face_l1[0] = 1;
face_l1[1] = 2;
face_l1[2] = 0;
face_l2[0] = 2;
face_l2[1] = 0;
face_l2[2] = 1;
face_ld[0] = 1;
face_ld[1] = -2;
face_ld[2] = 1;
}
//------------------------------------------------------------------
// Map values from mortar(tmor) to element(tx)
//------------------------------------------------------------------
void transf(double tmor[], double tx[])
{
double tmp[2][LX1][LX1];
int ig1, ig2, ig3, ig4, ie, iface, il1, il2, il3, il4;
int nnje, ije1, ije2, col, i, j, ig, il;
// zero out tx on element boundaries
col2(tx, (double *)tmult, ntot);
for (ie = 0; ie < nelt; ie++)
{
for (iface = 0; iface < NSIDES; iface++)
{
// get the collocation point index of the four local corners on the
// face iface of element ie
il1 = idel[ie][iface][0][0];
il2 = idel[ie][iface][0][LX1 - 1];
il3 = idel[ie][iface][LX1 - 1][0];
il4 = idel[ie][iface][LX1 - 1][LX1 - 1];
// get the mortar indices of the four local corners
ig1 = idmo[ie][iface][0][0][0][0];
ig2 = idmo[ie][iface][1][0][0][LX1 - 1];
ig3 = idmo[ie][iface][0][1][LX1 - 1][0];
ig4 = idmo[ie][iface][1][1][LX1 - 1][LX1 - 1];
// copy the value from tmor to tx for these four local corners
tx[il1] = tmor[ig1];
tx[il2] = tmor[ig2];
tx[il3] = tmor[ig3];
tx[il4] = tmor[ig4];
// nnje=1 for conforming faces, nnje=2 for nonconforming faces
if (cbc[ie][iface] == 3)
{
nnje = 2;
}
else
{
nnje = 1;
}
// for nonconforming faces
if (nnje == 2)
{
// nonconforming faces have four pieces of mortar, first map them to
// two intermediate mortars, stored in tmp
r_init((double *)tmp, LX1 * LX1 * 2, 0.0);
#pragma omp parallel for default(shared) private(ije1, ije2, col, i, j, ig, il) firstprivate(nnje, ie, iface, v_end, idmo, tmor, idel, qbnew)
for (ije1 = 0; ije1 < nnje; ije1++)
{
for (ije2 = 0; ije2 < nnje; ije2++)
{
for (col = 0; col < LX1; col++)
{
// in each row col, when coloumn i=1 or LX1, the value
// in tmor is copied to tmp
i = v_end[ije2];
ig = idmo[ie][iface][ije2][ije1][col][i];
tmp[ije1][col][i] = tmor[ig];
// in each row col, value in the interior three collocation
// points is computed by apply mapping matrix qbnew to tmor
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][col][i];
for (j = 0; j < LX1; j++)
{
ig = idmo[ie][iface][ije2][ije1][col][j];
tmp[ije1][col][i] = tmp[ije1][col][i] +
qbnew[ije2][j][i - 1] * tmor[ig];
}
}
}
}
}
// mapping from two pieces of intermediate mortar tmp to element
// face tx
for (ije1 = 0; ije1 < nnje; ije1++)
{
// the first column, col=0, is an edge of face iface.
// the value on the three interior collocation points, tx, is
// computed by applying mapping matrices qbnew to tmp.
// the mapping result is divided by 2, because there will be
// duplicated contribution from another face sharing this edge.
col = 0;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][col];
for (j = 0; j < LX1; j++)
{
tx[il] = tx[il] + qbnew[ije1][j][i - 1] *
tmp[ije1][j][col] * 0.5;
}
}
// for column 1 ~ lx-2
for (col = 1; col < LX1 - 1; col++)
{
//when i=0 or LX1-1, the collocation points are also on an edge of
// the face, so the mapping result also needs to be divided by 2
i = v_end[ije1];
il = idel[ie][iface][i][col];
tx[il] = tx[il] + tmp[ije1][i][col] * 0.5;
// compute the value at interior collocation points in
// columns 1 ~ LX1-1
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][col];
for (j = 0; j < LX1; j++)
{
tx[il] = tx[il] + qbnew[ije1][j][i - 1] * tmp[ije1][j][col];
}
}
}
// same as col=0
col = LX1 - 1;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][col];
for (j = 0; j < LX1; j++)
{
tx[il] = tx[il] + qbnew[ije1][j][i - 1] *
tmp[ije1][j][col] * 0.5;
}
}
}
// for conforming faces
}
else
{
// face interior
for (col = 1; col < LX1 - 1; col++)
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][col][i];
ig = idmo[ie][iface][0][0][col][i];
tx[il] = tmor[ig];
}
}
// edges of conforming faces
// if local edge 0 is a nonconforming edge
if (idmo[ie][iface][0][0][0][LX1 - 1] != -1)
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][0][i];
for (ije1 = 0; ije1 < 2; ije1++)
{
for (j = 0; j < LX1; j++)
{
ig = idmo[ie][iface][ije1][0][0][j];
tx[il] = tx[il] + qbnew[ije1][j][i - 1] * tmor[ig] * 0.5;
}
}
}
// if local edge 0 is a conforming edge
}
else
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][0][i];
ig = idmo[ie][iface][0][0][0][i];
tx[il] = tmor[ig];
}
}
// if local edge 1 is a nonconforming edge
if (idmo[ie][iface][1][0][1][LX1 - 1] != -1)
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][LX1 - 1];
for (ije1 = 0; ije1 < 2; ije1++)
{
for (j = 0; j < LX1; j++)
{
ig = idmo[ie][iface][1][ije1][j][LX1 - 1];
tx[il] = tx[il] + qbnew[ije1][j][i - 1] * tmor[ig] * 0.5;
}
}
}
// if local edge 1 is a conforming edge
}
else
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][LX1 - 1];
ig = idmo[ie][iface][0][0][i][LX1 - 1];
tx[il] = tmor[ig];
}
}
// if local edge 2 is a nonconforming edge
if (idmo[ie][iface][0][1][LX1 - 1][1] != -1)
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][LX1 - 1][i];
for (ije1 = 0; ije1 < 2; ije1++)
{
for (j = 0; j < LX1; j++)
{
ig = idmo[ie][iface][ije1][1][LX1 - 1][j];
tx[il] = tx[il] + qbnew[ije1][j][i - 1] * tmor[ig] * 0.5;
}
}
}
// if local edge 2 is a conforming edge
}
else
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][LX1 - 1][i];
ig = idmo[ie][iface][0][0][LX1 - 1][i];
tx[il] = tmor[ig];
}
}
// if local edge 3 is a nonconforming edge
if (idmo[ie][iface][0][0][LX1 - 1][0] != -1)
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][0];
for (ije1 = 0; ije1 < 2; ije1++)
{
for (j = 0; j < LX1; j++)
{
ig = idmo[ie][iface][0][ije1][j][0];
tx[il] = tx[il] + qbnew[ije1][j][i - 1] * tmor[ig] * 0.5;
}
}
}
// if local edge 3 is a conforming edge
}
else
{
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][0];
ig = idmo[ie][iface][0][0][i][0];
tx[il] = tmor[ig];
}
}
}
}
}
}
//------------------------------------------------------------------
// Map from element(tx) to mortar(tmor).
// tmor sums contributions from all elements.
//------------------------------------------------------------------
void transfb(double tmor[], double tx[])
{
const double third = 1.0 / 3.0;
int shift;
double tmp, tmp1, temp[2][LX1][LX1], top[2][LX1];
int il1, il2, il3, il4, ig1, ig2, ig3, ig4, ie, iface, nnje;
int ije1, ije2, col, i, j, ije, ig, il;
r_init(tmor, nmor, 0.0);
for (ie = 0; ie < nelt; ie++)
{
for (iface = 0; iface < NSIDES; iface++)
{
// nnje=1 for conforming faces, nnje=2 for nonconforming faces
if (cbc[ie][iface] == 3)
{
nnje = 2;
}
else
{
nnje = 1;
}
// get collocation point index of four local corners on the face
il1 = idel[ie][iface][0][0];
il2 = idel[ie][iface][0][LX1 - 1];
il3 = idel[ie][iface][LX1 - 1][0];
il4 = idel[ie][iface][LX1 - 1][LX1 - 1];
// get the mortar indices of the four local corners
ig1 = idmo[ie][iface][0][0][0][0];
ig2 = idmo[ie][iface][1][0][0][LX1 - 1];
ig3 = idmo[ie][iface][0][1][LX1 - 1][0];
ig4 = idmo[ie][iface][1][1][LX1 - 1][LX1 - 1];
// sum the values from tx to tmor for these four local corners
// only 1/3 of the value is summed, since there will be two duplicated
// contributions from the other two faces sharing this vertex
tmor[ig1] = tmor[ig1] + tx[il1] * third;
tmor[ig2] = tmor[ig2] + tx[il2] * third;
tmor[ig3] = tmor[ig3] + tx[il3] * third;
tmor[ig4] = tmor[ig4] + tx[il4] * third;
// for nonconforming faces
if (nnje == 2)
{
r_init((double *)temp, LX1 * LX1 * 2, 0.0);
// nonconforming faces have four pieces of mortar, first map tx to
// two intermediate mortars stored in temp
for (ije2 = 0; ije2 < nnje; ije2++)
{
shift = ije2;
for (col = 0; col < LX1; col++)
{
// For mortar points on face edge (top and bottom), copy the
// value from tx to temp
il = idel[ie][iface][v_end[ije2]][col];
temp[ije2][v_end[ije2]][col] = tx[il];
// For mortar points on face edge (top and bottom), calculate
// the interior points' contribution to them, i.e. top()
j = v_end[ije2];
tmp = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][col];
tmp = tmp + qbnew[ije2][j][i - 1] * tx[il];
}
top[ije2][col] = tmp;
// Use mapping matrices qbnew to map the value from tx to temp
// for mortar points not on the top bottom face edge.
#pragma omp parallel for default(shared) private(j, i, tmp, il) firstprivate(shift, ie, iface, col, ije2, idel, qbnew, tx)
for (j = 2 - shift - 1; j < LX1 - shift; j++)
{
tmp = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][col];
tmp = tmp + qbnew[ije2][j][i - 1] * tx[il];
};
temp[ije2][j][col] = tmp + temp[ije2][j][col];
}
}
}
// mapping from temp to tmor
for (ije1 = 0; ije1 < nnje; ije1++)
{
shift = ije1;
for (ije2 = 0; ije2 < nnje; ije2++)
{
// for each column of collocation points on a piece of mortar
for (col = 2 - shift - 1; col < LX1 - shift; col++)
{
// For the end point, which is on an edge (local edge 1,3),
// the contribution is halved since there will be duplicated
// contribution from another face sharing this edge.
ig = idmo[ie][iface][ije2][ije1][col][v_end[ije2]];
tmor[ig] = tmor[ig] + temp[ije1][col][v_end[ije2]] * 0.5;
// In each row of collocation points on a piece of mortar,
// sum the contributions from interior collocation points
// (i=1,LX1-2)
for (j = 0; j < LX1; j++)
{
tmp = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[ije2][j][i - 1] * temp[ije1][col][i];
}
ig = idmo[ie][iface][ije2][ije1][col][j];
tmor[ig] = tmor[ig] + tmp;
}
}
// For tmor on local edge 0 and 2, tmp is the contribution from
// an edge, so it is halved because of duplicated contribution
// from another face sharing this edge. tmp1 is contribution
// from face interior.
col = v_end[ije1];
ig = idmo[ie][iface][ije2][ije1][col][v_end[ije2]];
tmor[ig] = tmor[ig] + top[ije1][v_end[ije2]] * 0.5;
for (j = 0; j < LX1; j++)
{
tmp = 0.0;
tmp1 = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[ije2][j][i - 1] * temp[ije1][col][i];
tmp1 = tmp1 + qbnew[ije2][j][i - 1] * top[ije1][i];
}
ig = idmo[ie][iface][ije2][ije1][col][j];
tmor[ig] = tmor[ig] + tmp * 0.5 + tmp1;
}
}
}
// for conforming faces
}
else
{
// face interior
for (col = 1; col < LX1 - 1; col++)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][col][j];
ig = idmo[ie][iface][0][0][col][j];
tmor[ig] = tmor[ig] + tx[il];
}
}
// edges of conforming faces
// if local edge 0 is a nonconforming edge
if (idmo[ie][iface][0][0][0][LX1 - 1] != -1)
{
for (ije = 0; ije < 2; ije++)
{
for (j = 0; j < LX1; j++)
{
tmp = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][0][i];
tmp = tmp + qbnew[ije][j][i - 1] * tx[il];
}
ig = idmo[ie][iface][ije][0][0][j];
tmor[ig] = tmor[ig] + tmp * 0.5;
}
}
// if local edge 0 is a conforming edge
}
else
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][0][j];
ig = idmo[ie][iface][0][0][0][j];
tmor[ig] = tmor[ig] + tx[il] * 0.5;
}
}
// if local edge 1 is a nonconforming edge
if (idmo[ie][iface][1][0][1][LX1 - 1] != -1)
{
for (ije = 0; ije < 2; ije++)
{
for (j = 0; j < LX1; j++)
{
tmp = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][LX1 - 1];
tmp = tmp + qbnew[ije][j][i - 1] * tx[il];
}
ig = idmo[ie][iface][1][ije][j][LX1 - 1];
tmor[ig] = tmor[ig] + tmp * 0.5;
}
}
// if local edge 1 is a conforming edge
}
else
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][j][LX1 - 1];
ig = idmo[ie][iface][0][0][j][LX1 - 1];
tmor[ig] = tmor[ig] + tx[il] * 0.5;
}
}
// if local edge 2 is a nonconforming edge
if (idmo[ie][iface][0][1][LX1 - 1][1] != -1)
{
for (ije = 0; ije < 2; ije++)
{
for (j = 0; j < LX1; j++)
{
tmp = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][LX1 - 1][i];
tmp = tmp + qbnew[ije][j][i - 1] * tx[il];
}
ig = idmo[ie][iface][ije][1][LX1 - 1][j];
tmor[ig] = tmor[ig] + tmp * 0.5;
}
}
// if local edge 2 is a conforming edge
}
else
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][LX1 - 1][j];
ig = idmo[ie][iface][0][0][LX1 - 1][j];
tmor[ig] = tmor[ig] + tx[il] * 0.5;
}
}
// if local edge 3 is a nonconforming edge
if (idmo[ie][iface][0][0][LX1 - 1][0] != -1)
{
for (ije = 0; ije < 2; ije++)
{
for (j = 0; j < LX1; j++)
{
tmp = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
il = idel[ie][iface][i][0];
tmp = tmp + qbnew[ije][j][i - 1] * tx[il];
}
ig = idmo[ie][iface][0][ije][j][0];
tmor[ig] = tmor[ig] + tmp * 0.5;
}
}
// if local edge 3 is a conforming edge
}
else
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][j][0];
ig = idmo[ie][iface][0][0][j][0];
tmor[ig] = tmor[ig] + tx[il] * 0.5;
}
}
} //nnje=1
}
}
}
//--------------------------------------------------------------
// This subroutine performs the edge to mortar mapping and
// calculates the mapping result on the mortar point at a vertex
// under situation 1,2, or 3.
// n refers to the configuration of three edges sharing a vertex,
// n = 1: only one edge is nonconforming
// n = 2: two edges are nonconforming
// n = 3: three edges are nonconforming
//-------------------------------------------------------------------
void transfb_cor_e(int n, double *tmor, double tx[LX1][LX1][LX1])
{
double tmp;
int i;
tmp = tx[0][0][0];
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[0][0][i - 1] * tx[0][0][i];
}
if (n > 1)
{
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[0][0][i - 1] * tx[0][i][0];
}
}
if (n == 3)
{
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[0][0][i - 1] * tx[i][0][0];
}
}
*tmor = tmp;
}
//--------------------------------------------------------------
// This subroutine performs the mapping from face to mortar.
// Output tmor is the mapping result on a mortar vertex
// of situations of three edges and three faces sharing a vertex:
// n=4: only one face is nonconforming
// n=5: one face and one edge are nonconforming
// n=6: two faces are nonconforming
// n=7: three faces are nonconforming
//--------------------------------------------------------------
void transfb_cor_f(int n, double *tmor, double tx[LX1][LX1][LX1])
{
double temp[LX1], tmp;
int col, i;
r_init(temp, LX1, 0.0);
for (col = 0; col < LX1; col++)
{
temp[col] = tx[0][0][col];
for (i = 1; i < LX1 - 1; i++)
{
temp[col] = temp[col] + qbnew[0][0][i - 1] * tx[0][i][col];
}
}
tmp = temp[0];
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[0][0][i - 1] * temp[i];
}
if (n == 5)
{
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[0][0][i - 1] * tx[i][0][0];
}
}
if (n >= 6)
{
r_init(temp, LX1, 0.0);
for (col = 0; col < LX1; col++)
{
for (i = 1; i < LX1 - 1; i++)
{
temp[col] = temp[col] + qbnew[0][0][i - 1] * tx[i][0][col];
}
}
tmp = tmp + temp[0];
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[0][0][i - 1] * temp[i];
}
}
if (n == 7)
{
r_init(temp, LX1, 0.0);
for (col = 1; col < LX1 - 1; col++)
{
for (i = 1; i < LX1 - 1; i++)
{
temp[col] = temp[col] + qbnew[0][0][i - 1] * tx[i][col][0];
}
}
for (i = 1; i < LX1 - 1; i++)
{
tmp = tmp + qbnew[0][0][i - 1] * temp[i];
}
}
*tmor = tmp;
}
//------------------------------------------------------------------------
// Perform mortar to element mapping on a nonconforming face.
// This subroutin is used when all entries in tmor are zero except
// one tmor[j][i]=1. So this routine is simplified. Only one piece of
// mortar (tmor only has two indices) and one piece of intermediate
// mortar (tmp) are involved.
//------------------------------------------------------------------------
void transf_nc(double tmor[LX1][LX1], double tx[LX1][LX1])
{
double tmp[LX1][LX1];
int col, i, j;
r_init((double *)tmp, LX1 * LX1, 0.0);
for (col = 0; col < LX1; col++)
{
i = 0;
tmp[col][i] = tmor[col][i];
for (i = 1; i < LX1 - 1; i++)
{
for (j = 0; j < LX1; j++)
{
tmp[col][i] = tmp[col][i] + qbnew[0][j][i - 1] * tmor[col][j];
}
}
}
for (col = 0; col < LX1; col++)
{
i = 0;
tx[i][col] = tx[i][col] + tmp[i][col];
for (i = 1; i < LX1 - 1; i++)
{
for (j = 0; j < LX1; j++)
{
tx[i][col] = tx[i][col] + qbnew[0][j][i - 1] * tmp[j][col];
}
}
}
}
//------------------------------------------------------------------------
// Performs mapping from element to mortar when the nonconforming
// edges are shared by two conforming faces of an element.
//------------------------------------------------------------------------
void transfb_nc0(double tmor[LX1][LX1], double tx[LX1][LX1][LX1])
{
int i, j;
r_init((double *)tmor, LX1 * LX1, 0.0);
for (j = 0; j < LX1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
tmor[0][j] = tmor[0][j] + qbnew[0][j][i - 1] * tx[0][0][i];
}
}
}
//------------------------------------------------------------------------
// Maps values from element to mortar when the nonconforming edges are
// shared by two nonconforming faces of an element.
// Although each face shall have four pieces of mortar, only value in
// one piece (location (0,0)) is used in the calling routine so only
// the value in the first mortar is calculated in this subroutine.
//------------------------------------------------------------------------
void transfb_nc2(double tmor[LX1][LX1], double tx[LX1][LX1])
{
double bottom[LX1], temp[LX1][LX1];
int col, j, i;
r_init((double *)tmor, LX1 * LX1, 0.0);
r_init((double *)temp, LX1 * LX1, 0.0);
tmor[0][0] = tx[0][0];
// mapping from tx to intermediate mortar temp + bottom
for (col = 0; col < LX1; col++)
{
temp[0][col] = tx[0][col];
j = 0;
bottom[col] = 0.0;;
for (i = 1; i < LX1 - 1; i++)
{
bottom[col] = bottom[col] + qbnew[0][j][i - 1] * tx[i][col];
}
for (j = 1; j < LX1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
temp[j][col] = temp[j][col] + qbnew[0][j][i - 1] * tx[i][col];
}
}
}
// from intermediate mortar to mortar
// On the nonconforming edge, temp is divided by 2 as there will be
// a duplicate contribution from another face sharing this edge
col = 0;
for (j = 0; j < LX1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
tmor[col][j] = tmor[col][j] + qbnew[0][j][i - 1] * bottom[i] +
qbnew[0][j][i - 1] * temp[col][i] * 0.5;
}
}
for (col = 1; col < LX1; col++)
{
tmor[col][0] = tmor[col][0] + temp[col][0];
for (j = 0; j < LX1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
tmor[col][j] = tmor[col][j] + qbnew[0][j][i - 1] * temp[col][i];
}
}
}
}
//------------------------------------------------------------------------
// Maps values from element to mortar when the nonconforming edges are
// shared by a nonconforming face and a conforming face of an element
//------------------------------------------------------------------------
void transfb_nc1(double tmor[LX1][LX1], double tx[LX1][LX1])
{
double bottom[LX1], temp[LX1][LX1];
int col, j, i;
r_init((double *)tmor, LX1 * LX1, 0.0);
r_init((double *)temp, LX1 * LX1, 0.0);
tmor[0][0] = tx[0][0];
// Contribution from the nonconforming faces
// Since the calling subroutine is only interested in the value on the
// mortar (location (0,0)), only this piece of mortar is calculated.
for (col = 0; col < LX1; col++)
{
temp[0][col] = tx[0][col];
j = 0;
bottom[col] = 0.0;
for (i = 1; i < LX1 - 1; i++)
{
bottom[col] = bottom[col] + qbnew[0][j][i - 1] * tx[i][col];
}
for (j = 1; j < LX1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
temp[j][col] = temp[j][col] + qbnew[0][j][i - 1] * tx[i][col];
}
}
}
col = 0;
tmor[col][0] = tmor[col][0] + bottom[0];
for (j = 0; j < LX1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
// temp is not divided by 2 here. It includes the contribution
// from the other conforming face.
tmor[col][j] = tmor[col][j] + qbnew[0][j][i - 1] * bottom[i] +
qbnew[0][j][i - 1] * temp[col][i];
}
}
for (col = 1; col < LX1; col++)
{
tmor[col][0] = tmor[col][0] + temp[col][0];
for (j = 0; j < LX1; j++)
{
for (i = 1; i < LX1 - 1; i++)
{
tmor[col][j] = tmor[col][j] + qbnew[0][j][i - 1] * temp[col][i];
}
}
}
}
//-------------------------------------------------------------------
// Prepare initial guess for cg. All values from conforming
// boundary are copied and summed on tmor.
//-------------------------------------------------------------------
void transfb_c(double tx[])
{
const double third = 1.0 / 3.0;
int il1, il2, il3, il4, ig1, ig2, ig3, ig4, ie, iface, col, j, ig, il;
r_init(tmort, nmor, 0.0);
for (ie = 0; ie < nelt; ie++)
{
for (iface = 0; iface < NSIDES; iface++)
{
if (cbc[ie][iface] != 3)
{
il1 = idel[ie][iface][0][0];
il2 = idel[ie][iface][0][LX1 - 1];
il3 = idel[ie][iface][LX1 - 1][0];
il4 = idel[ie][iface][LX1 - 1][LX1 - 1];
ig1 = idmo[ie][iface][0][0][0][0];
ig2 = idmo[ie][iface][1][0][0][LX1 - 1];
ig3 = idmo[ie][iface][0][1][LX1 - 1][0];
ig4 = idmo[ie][iface][1][1][LX1 - 1][LX1 - 1];
tmort[ig1] = tmort[ig1] + tx[il1] * third;
tmort[ig2] = tmort[ig2] + tx[il2] * third;
tmort[ig3] = tmort[ig3] + tx[il3] * third;
tmort[ig4] = tmort[ig4] + tx[il4] * third;
for (col = 1; col < LX1 - 1; col++)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][col][j];
ig = idmo[ie][iface][0][0][col][j];
tmort[ig] = tmort[ig] + tx[il];
}
}
if (idmo[ie][iface][0][0][0][LX1 - 1] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][0][j];
ig = idmo[ie][iface][0][0][0][j];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
}
}
if (idmo[ie][iface][1][0][1][LX1 - 1] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][j][LX1 - 1];
ig = idmo[ie][iface][0][0][j][LX1 - 1];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
}
}
if (idmo[ie][iface][0][1][LX1 - 1][1] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][LX1 - 1][j];
ig = idmo[ie][iface][0][0][LX1 - 1][j];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
}
}
if (idmo[ie][iface][0][0][LX1 - 1][0] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][j][0];
ig = idmo[ie][iface][0][0][j][0];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
}
}
}
}
}
}
//-------------------------------------------------------------------
// Prepare initial guess for CG. All values from conforming
// boundary are copied and summed in tmort.
// mormult is multiplicity, which is used to average tmort.
//-------------------------------------------------------------------
void transfb_c_2(double tx[])
{
const double third = 1.0 / 3.0;
int il1, il2, il3, il4, ig1, ig2, ig3, ig4, ie, iface, col, j, ig, il;
r_init(tmort, nmor, 0.0);
r_init(mormult, nmor, 0.0);
for (ie = 0; ie < nelt; ie++)
{
for (iface = 0; iface < NSIDES; iface++)
{
if (cbc[ie][iface] != 3)
{
il1 = idel[ie][iface][0][0];
il2 = idel[ie][iface][0][LX1 - 1];
il3 = idel[ie][iface][LX1 - 1][0];
il4 = idel[ie][iface][LX1 - 1][LX1 - 1];
ig1 = idmo[ie][iface][0][0][0][0];
ig2 = idmo[ie][iface][1][0][0][LX1 - 1];
ig3 = idmo[ie][iface][0][1][LX1 - 1][0];
ig4 = idmo[ie][iface][1][1][LX1 - 1][LX1 - 1];
tmort[ig1] = tmort[ig1] + tx[il1] * third;
tmort[ig2] = tmort[ig2] + tx[il2] * third;
tmort[ig3] = tmort[ig3] + tx[il3] * third;
tmort[ig4] = tmort[ig4] + tx[il4] * third;
mormult[ig1] = mormult[ig1] + third;
mormult[ig2] = mormult[ig2] + third;
mormult[ig3] = mormult[ig3] + third;
mormult[ig4] = mormult[ig4] + third;
for (col = 1; col < LX1 - 1; col++)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][col][j];
ig = idmo[ie][iface][0][0][col][j];
tmort[ig] = tmort[ig] + tx[il];
mormult[ig] = mormult[ig] + 1.0;
}
}
if (idmo[ie][iface][0][0][0][LX1 - 1] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][0][j];
ig = idmo[ie][iface][0][0][0][j];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
mormult[ig] = mormult[ig] + 0.5;
}
}
if (idmo[ie][iface][1][0][1][LX1 - 1] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][j][LX1 - 1];
ig = idmo[ie][iface][0][0][j][LX1 - 1];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
mormult[ig] = mormult[ig] + 0.5;
}
}
if (idmo[ie][iface][0][1][LX1 - 1][1] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][LX1 - 1][j];
ig = idmo[ie][iface][0][0][LX1 - 1][j];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
mormult[ig] = mormult[ig] + 0.5;
}
}
if (idmo[ie][iface][0][0][LX1 - 1][0] == -1)
{
for (j = 1; j < LX1 - 1; j++)
{
il = idel[ie][iface][j][0];
ig = idmo[ie][iface][0][0][j][0];
tmort[ig] = tmort[ig] + tx[il] * 0.5;
mormult[ig] = mormult[ig] + 0.5;
}
}
}
}
}
}
//------------------------------------------------------------------
// initialize double precision array a with length of n
//------------------------------------------------------------------
void reciprocal(double a[], int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n)
for (i = 0; i < n; i++)
{
a[i] = 1.0 / a[i];
}
}
//------------------------------------------------------------------
// initialize double precision array a with length of n
//------------------------------------------------------------------
void r_init(double a[], int n, double _cnst)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, _cnst)
for (i = 0; i < n; i++)
{
a[i] = _cnst;
}
}
//------------------------------------------------------------------
// initialize integer array a with length of n
//------------------------------------------------------------------
void nr_init(int a[], int n, int _cnst)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, _cnst)
for (i = 0; i < n; i++)
{
a[i] = _cnst;
}
}
//------------------------------------------------------------------
// initialize int array a with length of n
//------------------------------------------------------------------
void l_init(int a[], int n, int _cnst)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, _cnst)
for (i = 0; i < n; i++)
{
a[i] = _cnst;
}
}
//------------------------------------------------------------------
// copy array of integers b to a, the length of array is n
//------------------------------------------------------------------
void ncopy(int a[], int b[], int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, b)
for (i = 0; i < n; i++)
{
a[i] = b[i];
}
}
//------------------------------------------------------------------
// copy double precision array b to a, the length of array is n
//------------------------------------------------------------------
void copy(double a[], double b[], int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, b)
for (i = 0; i < n; i++)
{
a[i] = b[i];
}
}
//-----------------------------------------------------------------
// a=b*c1
//-----------------------------------------------------------------
void adds2m1(double a[], double b[], double c1, int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, c1, b)
for (i = 0; i < n; i++)
{
a[i] = a[i] + c1 * b[i];
}
}
//-----------------------------------------------------------------
// a=c1*a+b
//-----------------------------------------------------------------
void adds1m1(double a[], double b[], double c1, int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, c1, b)
for (i = 0; i < n; i++)
{
a[i] = c1 * a[i] + b[i];
}
}
//------------------------------------------------------------------
// a=a*b
//------------------------------------------------------------------
void col2(double a[], double b[], int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, b)
for (i = 0; i < n; i++)
{
a[i] = a[i] * b[i];
}
}
//------------------------------------------------------------------
// zero out array of integers
//------------------------------------------------------------------
void nrzero(int na[], int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n)
for (i = 0; i < n; i++)
{
na[i] = 0;
}
}
//------------------------------------------------------------------
// a=a+b
//------------------------------------------------------------------
void add2(double a[], double b[], int n)
{
int i;
#pragma omp parallel for default(shared) private(i) firstprivate(n, b)
for (i = 0; i < n; i++)
{
a[i] = a[i] + b[i];
}
}
//------------------------------------------------------------------
// calculate the integral of ta1 over the whole domain
//------------------------------------------------------------------
double calc_norm()
{
double total, ieltotal;
int iel, k, j, i, isize;
total = 0.0;
#pragma omp parallel for default(shared) private(iel, k, j, i, ieltotal, isize) firstprivate(nelt, size_e, ta1, w3m1, jacm1_s) reduction(+ : total)
for (iel = 0; iel < nelt; iel++)
{
ieltotal = 0.0;
isize = size_e[iel];
for (k = 0; k < LX1; k++)
{
for (j = 0; j < LX1; j++)
{
for (i = 0; i < LX1; i++)
{
ieltotal = ieltotal + ta1[iel][k][j][i] * w3m1[k][j][i]
* jacm1_s[isize][k][j][i];
}
}
}
total = total + ieltotal;
}
return total;
}
//-----------------------------------------------------------------
// input array frontier, perform (potentially) parallel add so that
// the output frontier[i] has sum of frontier[1]+frontier[2]+...+frontier[i]
//-----------------------------------------------------------------
void parallel_add(int frontier[])
{
int nellog, i, ahead, ii, ntemp, n1, ntemp1, n2, iel;
if (nelt <= 1) return;
nellog = 0;
iel = 1;
do
{
iel = iel * 2;
nellog = nellog + 1;
}
while (iel < nelt);
ntemp = 1;
for (i = 0; i < nellog; i++)
{
n1 = ntemp * 2;
n2 = n1;
for (iel = n1; iel <= nelt; iel += n1)
{
ahead = frontier[iel - ntemp - 1];
#pragma omp parallel for default(shared) private(ii) firstprivate(ntemp, iel, ahead)
for (ii = ntemp - 1; ii >= 0; ii--)
{
frontier[iel - ii - 1] = frontier[iel - ii - 1] + ahead;
}
n2 = iel;
}
if (n2 <= nelt) n2 = n2 + n1;
ntemp1 = n2 - nelt;
if (ntemp1 < ntemp)
{
ahead = frontier[n2 - ntemp - 1];
#pragma omp parallel for default(shared) private(ii) firstprivate(ntemp, ntemp1, n2, ahead)
for (ii = ntemp - 1; ii >= ntemp1; ii--)
{
frontier[n2 - ii - 1] = frontier[n2 - ii - 1] + ahead;
}
}
ntemp = n1;
}
}
//------------------------------------------------------------------
// Perform stiffness summation: element-mortar-element mapping
//------------------------------------------------------------------
void dssum()
{
transfb(dpcmor, (double *)dpcelm);
transf (dpcmor, (double *)dpcelm);
}
//------------------------------------------------------------------
// assign the value val to face(iface,iel) of array a.
//------------------------------------------------------------------
void facev(double a[LX1][LX1][LX1], int iface, double val)
{
int kx1, kx2, ky1, ky2, kz1, kz2, ix, iy, iz;
kx1 = 1;
ky1 = 1;
kz1 = 1;
kx2 = LX1;
ky2 = LX1;
kz2 = LX1;
if (iface == 0) kx1 = LX1;
if (iface == 1) kx2 = 1;
if (iface == 2) ky1 = LX1;
if (iface == 3) ky2 = 1;
if (iface == 4) kz1 = LX1;
if (iface == 5) kz2 = 1;
#pragma omp parallel for default(shared) private(ix, iy, iz) firstprivate(kx1, kx2, ky1, ky2, kz1, kz2, val)
for (ix = kx1 - 1; ix < kx2; ix++)
{
for (iy = ky1 - 1; iy < ky2; iy++)
{
for (iz = kz1 - 1; iz < kz2; iz++)
{
// test 1
a[iz][iy][ix] = val;
// test 2
}
}
}
}
void verify(char *Class, int *verified)
{
double norm, epsilon, norm_dif, norm_ref;
// tolerance level
epsilon = 1.0e-08;
// compute the temperature integral over the whole domain
norm = calc_norm();
*verified = 1;
if ( *Class == 'S' )
{
norm_ref = 0.1890013110962E-02;
}
else if ( *Class == 'W' )
{
norm_ref = 0.2569794837076E-04;
}
else if ( *Class == 'A' )
{
norm_ref = 0.8939996281443E-04;
}
else if ( *Class == 'B' )
{
norm_ref = 0.4507561922901E-04;
}
else if ( *Class == 'C' )
{
norm_ref = 0.1544736587100E-04;
}
else if ( *Class == 'D' )
{
norm_ref = 0.1577586272355E-05;
}
else
{
*Class = 'U';
norm_ref = 1.0;
*verified = 0;
}
norm_dif = fabs((norm - norm_ref) / norm_ref);
//---------------------------------------------------------------------
// Output the comparison of computed results to known cases.
//---------------------------------------------------------------------
printf("\n");
if (*Class != 'U')
{
printf(" Verification being performed for class %c\n", *Class);
printf(" accuracy setting for epsilon = %20.13E\n", epsilon);
}
else
{
printf(" Unknown class\n");
}
if (*Class != 'U')
{
printf(" Comparison of temperature integrals\n");
}
else
{
printf(" Temperature integral\n");
}
if (*Class == 'U')
{
printf(" %20.13E\n", norm);
}
else if (norm_dif <= epsilon)
{
printf(" %20.13E%20.13E%20.13E\n", norm, norm_ref, norm_dif);
}
else
{
*verified = 0;
printf(" FAILURE: %20.13E%20.13E%20.13E\n", norm, norm_ref, norm_dif);
}
if (*Class == 'U')
{
printf(" No reference values provided\n");
printf(" No verification performed\n");
}
else if (*verified)
{
printf(" Verification Successful\n");
}
else
{
printf(" Verification failed\n");
}
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified)
{
char size[16];
int j;
printf( "\n\n %s Benchmark Completed.\n", name );
printf( " Class = %12c\n", class );
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if ( ( n2 == 0 ) && ( n3 == 0 ) )
{
if ( ( name[0] == 'E' ) && ( name[1] == 'P' ) )
{
sprintf( size, "%15.0lf", pow(2.0, n1) );
j = 14;
if ( size[j] == '.' )
{
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf( " Size = %15s\n", size );
}
else
{
printf( " Size = %12d\n", n1 );
}
}
else
{
printf( " Size = %4dx%4dx%4d\n", n1, n2, n3 );
}
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2lf\n", t );
printf( " Mop/s total = %15.2lf\n", mops );
printf( " Operation type = %24s\n", optype );
if ( verified )
printf( " Verification = %12s\n", "SUCCESSFUL" );
else
printf( " Verification = %12s\n", "UNSUCCESSFUL" );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return ( t );
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return ( elapsed[n] );
}
|
sptrackrf.h | //--------------------------------------------------------------------------------
// Copyright (c) 2017-2020, sanko-shoko. All rights reserved.
//--------------------------------------------------------------------------------
#ifndef __SP_TRACK3D_H__
#define __SP_TRACK3D_H__
#include "spcore/spcore.h"
#include "spapp/spdata/spmodel.h"
#include "spapp/spdata/spbmp.h"
#include "spapp/spalgo/sprforest.h"
#if SP_USE_OMP
#include <omp.h>
#endif
namespace sp{
class TrackRF {
public:
const int POINT_NUM = 20;
const int SAMPLE_NUM = 1500;
struct GeoNode{
// geodesic pose
Pose pose;
// sample points
Mem1<Vec3> pnts;
// train 6 tree
RandomForestReg rf;
};
Mem1<GeoNode> m_nodes;
// random range
SP_REAL m_randTrn;
SP_REAL m_randRot;
public:
Vec3 getDirect(const Pose &pose) {
return unitVec((invRot(pose.rot) * pose.pos)) * -1.0;
}
bool valid() {
return (m_nodes.size() > 0) ? true : false;
}
void train(const Mem1<Mesh3> &model, const int div = 3) {
srand(0);
const CamParam cam = getCamParam(300, 300);
const SP_REAL radius = getModelRadius(model);
const SP_REAL distance = getModelDistance(model, cam);
m_randTrn = radius * 0.1;
m_randRot = 10.0 * SP_PI / 180.0;
const int gnum = getGeodesicMeshNum(div);
m_nodes.resize(gnum);
#if SP_USE_OMP
#pragma omp parallel for
#endif
for (int i = 0; i < gnum; i++) {
#if SP_USE_OMP
if (omp_get_thread_num() == 0) {
printf("\rtrain [%s] ", progress(i, gnum / omp_get_num_threads()));
}
fflush(stdout);
#else
printf("\rtrain [%s] ", progress(i, gnum));
fflush(stdout);
#endif
const Pose pose = getGeodesicPose(div, i, distance);
Mem2<SP_REAL> depth;
renderDepth(depth, cam, pose, model);
makeTree(m_nodes[i], cam, pose, depth, i);
}
}
void makeTree(GeoNode &gnode, const CamParam &cam, const Pose &pose, const Mem2<SP_REAL> &depth, const int seed) {
gnode.pose = pose;
genSamplePnts(gnode.pnts, cam, pose, depth, seed);
Mem1<Mem<SP_REAL> > Xs;
Mem1<SP_REAL> Ys[6];
genDataset(Xs, Ys, gnode.pnts, cam, pose, depth, seed);
for (int s = 0; s < 6; s++) {
gnode.rf.train(Xs, Ys[s], SAMPLE_NUM);
}
}
template<typename DEPTH>
bool execute(Pose &pose, const CamParam &cam, const Mem2<DEPTH> &depth) {
const SP_REAL maxAngle = 35.0 * SP_PI / 180.0;
Mem1<Mat> vals;
Mem1<Mat> devs;
Mem1<GeoNode*> refs;
for (int i = 0; i < m_nodes.size(); i++) {
GeoNode &node = m_nodes[i];
const Vec3 ref = getDirect(node.pose);
const Vec3 vec = getDirect(pose);
const SP_REAL angle = acos(dotVec(vec, ref));
if (fabs(angle) < maxAngle) {
Mem<SP_REAL> data = Mem1<SP_REAL>(POINT_NUM);
for (int p = 0; p < POINT_NUM; p++) {
const Vec3 pos = pose * node.pnts[p];
const Vec2 npx = prjVec(pos);
const Vec2 pix = mulCam(cam, npx);
double d = depth(round(pix.x), round(pix.y));
d = (d > 0.0) ? d : (1.0 + 0.1 * randu()) * pose.pos.z;
const Vec3 v = getVec3(npx.x, npx.y, 1.0) * d;
data[p] = dotVec(ref, invPose(pose) * v - node.pnts[p]);
}
Mat val = zeroMat(6, 1);
Mat dev = zeroMat(6, 1);
const Mem1<const RandomForestReg::Node*> &rfnode = node.rf.execute2(data);
for (int s = 0; s < 6; s++) {
val[s] = rfnode[s]->val;
dev[s] = rfnode[s]->dev;
}
vals.push(val);
devs.push(dev);
refs.push(&node);
}
}
{
struct Tmp {
SP_REAL v;
SP_REAL eval;
bool operator > (const Tmp t) const { return this->eval > t.eval; }
bool operator < (const Tmp t) const { return this->eval < t.eval; }
};
Mat sum = zeroMat(6, 1);
for (int p = 0; p < 6; p++) {
Mem1<Tmp> tmps;
for (int i = 0; i < vals.size(); i++) {
Mat m = zeroMat(6, 1);
m[p] = vals[i][p];
Tmp tmp;
tmp.v = vals[i][p];
//tmp.eval = devs[i][p];
tmp.eval = evalPose(*refs[i], cam, pose * invPose(getPose(m)), depth);
tmps.push(tmp);
}
sort(tmps);
const int num = round(tmps.size() * 0.2);
for (int i = 0; i < num; i++) {
sum[p] += tmps[i].v;
}
sum[p] /= num;
}
pose = pose * invPose(getPose(sum));
}
return true;
}
template<typename DEPTH>
bool execute(Pose &pose, const CamParam &cam, const Mem2<DEPTH> &depth, const int itmax) {
bool ret = true;
for (int it = 0; ret && it < itmax; it++) {
ret = execute(pose, cam, depth);
}
return ret;
}
private:
void genSamplePnts(Mem1<Vec3> &pnts, const CamParam &cam, const Pose &pose, const Mem2<SP_REAL> &depth, const int seed) {
srand(seed);
struct Tmp {
Vec3 pos;
SP_REAL eval;
bool operator > (const Tmp &pd) const { return eval > pd.eval; }
bool operator < (const Tmp &pd) const { return eval < pd.eval; }
};
Mem1<Tmp> tmps;
const SP_REAL angle = randu() * SP_PI;
const Vec2 nl = getVec2(cos(angle), sin(angle));
for (int v = 0; v < cam.dsize[1]; v++) {
for (int u = 0; u < cam.dsize[0]; u++) {
const SP_REAL d = depth(u, v);
if (d > 0.0) {
const Vec2 npx = invCam(cam, getVec2(u, v));
Tmp tmp;
tmp.pos = getVec3(npx.x, npx.y, 1.0) * d;
tmp.eval = dotVec(nl, getVec2(u, v));
tmps.push(tmp);
}
}
}
sort(tmps);
pnts.clear();
const SP_REAL rate = 0.3 * randu() + 0.4; // (0.1, 0.7)
for (int p = 0; p < POINT_NUM; p++) {
const int i = rand() % round(rate * tmps.size());
const Vec3 vec = invPose(pose) * tmps[i].pos;
pnts.push(vec);
}
}
void genDataset(Mem1<Mem<SP_REAL> > &Xs, Mem1<SP_REAL> *Ys, const Mem1<Vec3> &pnts, const CamParam &cam, const Pose &pose, const Mem2<SP_REAL> &depth, const int seed) {
srand(seed);
const Vec3 Nv = getDirect(pose);
for (int i = 0; i < SAMPLE_NUM; i++) {
const Pose delta = randuPose(m_randRot, m_randTrn);
const Pose tpose = pose * delta;
Mem<SP_REAL> data = Mem1<SP_REAL>(POINT_NUM);
for (int p = 0; p < POINT_NUM; p++) {
const Vec3 pos = tpose * pnts[p];
const Vec2 npx = prjVec(pos);
const Vec2 pix = mulCam(cam, npx);
SP_REAL d = depth(round(pix.x), round(pix.y));
d = (d > 0.0) ? d : pose.pos.z + randu() * m_randTrn;
const Vec3 vec = getVec3(npx.x, npx.y, 1.0) * d;
const Vec3 vec1 = invPose(tpose) * vec;
const Vec3 vec2 = pnts[p];
const Vec3 dif = vec1 - vec2;
data[p] = dotVec(Nv, invPose(tpose) * vec - pnts[p]);
}
Xs.push(data);
const Mat m = getMat(delta, 6, 1);
for (int s = 0; s < 6; s++) {
Ys[s].push(m[s]);
}
}
}
template<typename DEPTH>
SP_REAL evalPose(const GeoNode &node, const CamParam &cam, const Pose &pose, const Mem2<DEPTH> &depth) {
Mem1<SP_REAL> data(POINT_NUM);
for (int p = 0; p < POINT_NUM; p++) {
const Vec3 pos = pose * node.pnts[p];
const Vec2 npx = prjVec(pos);
const Vec2 pix = mulCam(cam, npx);
const double d = depth(round(pix.x), round(pix.y));
if (d > 0.0) {
data[p] = fabs(pos.z - d);
}
else {
data[p] = SP_INFINITY;
}
}
return median(data);
}
};
}
#endif |
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if ((sharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
register Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
register ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
register ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) memset(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) memset(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,0,0,MagickTrue,exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,kuwahara_image,gaussian_image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
register size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
register ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanLinePixels,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanLinePixels_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanLinePixels));
if (scanLinePixels_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
register ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
PixelTrait
traits;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult),
q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)*
mult),q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*
mult),q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception);
if (blur_image != (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return(blur_image);
}
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register MagickRealType
*magick_restrict k;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
extern const char
DefaultTileFrame[];
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",
(double) colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t)
radius,(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",
(double) (percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
RectangleInfo
raise;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
raise.width=(size_t) (2*i+2);
raise.height=(size_t) (2*i+2);
raise.x=(i-1)/2;
raise.y=(i-1)/2;
(void) RaiseImage(preview_image,&raise,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double)
raise.height,(double) raise.x,(double) raise.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*
degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%.20gb ",factor,(double) ((MagickOffsetType)
GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,
MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) ||
(sin_theta == (double *) NULL))
{
if (cos_theta != (double *) NULL)
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
if (sin_theta != (double *) NULL)
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
register ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
register const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double)
*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
register const Quantum
*magick_restrict l,
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
register ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define GetShadeIntensity(image,pixel) \
ClampPixel(GetPixelIntensity((image),(pixel)))
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post)+
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre)-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if ((shade_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,0,0,MagickTrue,exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if ((unsharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
affinity.c | #include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <math.h>
#define LEN 10000000
int main(int argc, char** argv)
{
int rank, size;
int provided;
double *input, *output;
double t0, t1;
MPI_Init_thread(&argc, &argv, 0, &provided);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
input = (double *) malloc(LEN * sizeof(double));
output = (double *) malloc(LEN * sizeof(double));
// Initialize
for (int i=0; i < LEN; i++)
input[i] = (rank+1)*i;
t0 = MPI_Wtime();
// Compute
#pragma omp parallel for
for (int i=0; i < LEN; i++)
output[i] = sqrt(input[i]) * 12.4*pow(i, 2.3);
t1 = MPI_Wtime();
printf("Time: %.5f\n", t1-t0);
free(input);
free(output);
MPI_Finalize();
}
|
omptests.c | #include <stdlib.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
/// Two methods store in different compilation units, results in a compile-time
// error. Duplicate .omp_offloading.entry
void test_comp_unit_1(const int niters, double* a);
void test_comp_unit_2(const int niters, double* a);
int main()
{
const int niters = 10;
double* a = (double*)malloc(sizeof(double)*niters);
#pragma omp target data map(from:a[:niters])
{
test_comp_unit_1(niters, a);
test_comp_unit_2(niters, a);
}
double res = 0.0;
for(int ii = 0; ii < niters; ++ii)
{
res += a[ii];
}
printf("--> %s <--\n",(res < 90.001 && res > 89.999) ? "success" : "error");
return 0;
}
/// Presumably creates an .omp_offloading.entry
void test_comp_unit_1(const int niters, double* a)
{
#pragma omp target
for(int ii = 0; ii < niters; ++ii)
a[ii] = (double)ii;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(PrimitiveInfo *,const char *);
static void
TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(PrimitiveInfo *,const size_t),
TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo,
PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireMagickMemory(sizeof(*draw_info));
if (draw_info == (DrawInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (DrawInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
(void) CloneString(&clone_info->primitive,draw_info->primitive);
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
(void) CloneString(&clone_info->text,draw_info->text);
(void) CloneString(&clone_info->font,draw_info->font);
(void) CloneString(&clone_info->metrics,draw_info->metrics);
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; draw_info->dash_pattern[x] != 0.0; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern,
(size_t) (x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->gradient.stops,
draw_info->gradient.stops,(size_t) number_stops*
sizeof(*clone_info->gradient.stops));
}
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
clone_info->bounds=draw_info->bounds;
clone_info->clip_units=draw_info->clip_units;
clone_info->render=draw_info->render;
clone_info->alpha=draw_info->alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info,
% const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int CompareEdges(const void *x,const void *y)
{
register const EdgeInfo
*p,
*q;
/*
Compare two edges.
*/
p=(const EdgeInfo *) x;
q=(const EdgeInfo *) y;
if ((p->points[0].y-MagickEpsilon) > q->points[0].y)
return(1);
if ((p->points[0].y+MagickEpsilon) < q->points[0].y)
return(-1);
if ((p->points[0].x-MagickEpsilon) > q->points[0].x)
return(1);
if ((p->points[0].x+MagickEpsilon) < q->points[0].x)
return(-1);
if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)-
(p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0)
return(1);
return(-1);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g %g - %g %g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g %g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(
const DrawInfo *magick_unused(draw_info),const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory((size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) ResetMagickMemory(&point,0,sizeof(point));
(void) ResetMagickMemory(&bounds,0,sizeof(bounds));
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((path_info[i].point.y == point.y) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),CompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g %g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info)
{
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
}
coordinates--;
/*
Eliminate duplicate points.
*/
if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue;
if ((fabs(p.x-primitive_info[i].point.x) < MagickEpsilon) &&
(fabs(p.y-primitive_info[i].point.y) < MagickEpsilon))
continue;
/*
Mark the p point as open if it does not match the q.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo
% structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
draw_info->signature=(~MagickSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source,image,1,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
(void) InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelInfoPixel(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
% PolygonInfo *polygon_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
const PolygonInfo *polygon_info,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
double
mid;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) QueryColorCompliance("#0000",AllCompliance,&clone_info->fill,
exception);
resolution.x=DefaultResolution;
resolution.y=DefaultResolution;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/72.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
(void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke,
exception);
else
(void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
}
}
(void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *name,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the name of the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *name,ExceptionInfo *exception)
{
char
filename[MaxTextExtent];
Image
*clip_mask;
const char
*value;
DrawInfo
*clone_info;
MagickStatusType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
(void) FormatLocaleString(filename,MaxTextExtent,"%s",name);
value=GetImageArtifact(image,filename);
if (value == (const char *) NULL)
return(MagickFalse);
clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
(void) QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(Quantum) TransparentAlpha;
(void) SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
draw_info->clip_mask);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,value);
(void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
clone_info->clip_mask=(char *) NULL;
status=NegateImage(clip_mask,MagickFalse,exception);
(void) SetImageMask(image,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
status&=DrawImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
double
length,
maximum_length,
offset,
scale,
total_length;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register ssize_t
i;
register double
dx,
dy;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+1UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*(draw_info->dash_pattern[0]-0.5);
offset=draw_info->dash_offset != 0.0 ? scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*(draw_info->dash_pattern[n]+0.5);
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot((double) dx,dy);
if (length == 0.0)
{
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
for (total_length=0.0; (total_length+length) <= maximum_length; )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
j=1;
}
else
{
if ((j+1) > (ssize_t) (2*number_vertices))
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((value == 0.0) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->point=point;
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
DrawInfo
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
double
angle,
factor,
primitive_extent;
PointInfo
point;
PixelInfo
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
length,
number_points;
ssize_t
j,
k,
n;
/*
Ensure the annotation info is valid.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(
sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=6553;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
(void) QueryColorCompliance("#000000",AllCompliance,&start_color,
exception);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetMagickToken(q,&q,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.sx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.rx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ry=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.sy=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.tx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ty=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("clip-path",keyword) == 0)
{
/*
Create clip mask.
*/
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->clip_mask,token);
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetMagickToken(q,&q,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetMagickToken(q,&q,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetMagickToken(q,&q,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
status=MagickFalse;
else
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetMagickToken(q,&q,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MaxTextExtent);
graphic_context[n]->fill_pattern=ReadImage(pattern_info,
exception);
CatchException(exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("fill-alpha",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->fill.alpha=(double) QuantumRange*
factor*StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetMagickToken(q,&q,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *)
RelinquishMagickMemory(graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->pointsize=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetMagickToken(q,&q,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
status=MagickFalse;
else
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetMagickToken(q,&q,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
status=MagickFalse;
else
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->weight=StringToUnsignedLong(token);
if (LocaleCompare(token,"all") == 0)
graphic_context[n]->weight=0;
if (LocaleCompare(token,"bold") == 0)
graphic_context[n]->weight=700;
if (LocaleCompare(token,"bolder") == 0)
if (graphic_context[n]->weight <= 800)
graphic_context[n]->weight+=100;
if (LocaleCompare(token,"lighter") == 0)
if (graphic_context[n]->weight >= 100)
graphic_context[n]->weight-=100;
if (LocaleCompare(token,"normal") == 0)
graphic_context[n]->weight=400;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetMagickToken(q,&q,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
status=MagickFalse;
else
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetMagickToken(q,&q,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
status=MagickFalse;
else
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
(char **) NULL);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->kerning=StringToDouble(token,(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
primitive_type=LinePrimitive;
else
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("matte",keyword) == 0)
primitive_type=MattePrimitive;
else
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->alpha=ClampToQuantum(QuantumRange*(1.0-((1.0-
QuantumScale*graphic_context[n]->alpha)*factor*
StringToDouble(token,(char **) NULL))));
graphic_context[n]->fill.alpha=(double) graphic_context[n]->alpha;
graphic_context[n]->stroke.alpha=(double) graphic_context[n]->alpha;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetMagickToken(q,&q,token);
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
break;
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
n=0;
break;
}
if (graphic_context[n]->clip_mask != (char *) NULL)
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageMask(image,(Image *) NULL,exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("pattern",token) == 0)
break;
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetMagickToken(q,&q,token);
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
GetMagickToken(q,&q,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) SetImageArtifact(image,name,token);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
GetMagickToken(q,&q,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetMagickToken(q,&q,token);
(void) CopyMagickString(type,token,MaxTextExtent);
GetMagickToken(q,&q,token);
segment.x1=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.y1=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.x2=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.y2=StringToDouble(token,(char **) NULL);
if (LocaleCompare(type,"radial") == 0)
{
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
}
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
GetMagickToken(q,&q,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetMagickToken(q,&q,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,(char **) NULL)-
0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,(char **) NULL)-
0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.width=(size_t) floor(StringToDouble(token,
(char **) NULL)+0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.height=(size_t) floor(StringToDouble(token,
(char **) NULL)+0.5);
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
break;
}
if (LocaleCompare("defs",token) == 0)
break;
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=StringToDouble(token,(char **) NULL);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.sx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.sy=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=StringToDouble(token,(char **) NULL);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=StringToDouble(token,(char **) NULL);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
GetMagickToken(q,&q,token);
(void) QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
(void) GradientImage(image,LinearGradient,ReflectSpread,
&start_color,&stop_color,exception);
start_color=stop_color;
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MaxTextExtent);
graphic_context[n]->stroke_pattern=ReadImage(pattern_info,
exception);
CatchException(exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->stroke_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+1UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
for (j=0; j < x; j++)
{
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
(char **) NULL);
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->dash_offset=StringToDouble(token,
(char **) NULL);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetMagickToken(q,&q,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
status=MagickFalse;
else
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetMagickToken(q,&q,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
status=MagickFalse;
else
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->stroke.alpha=(double) QuantumRange*
factor*StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->stroke_width=StringToDouble(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetMagickToken(q,&q,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetMagickToken(q,&q,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->text_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.tx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ty=StringToDouble(token,(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,(char **) NULL)+0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,(char **) NULL)+0.5);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((affine.sx != 1.0) || (affine.rx != 0.0) || (affine.ry != 0.0) ||
(affine.sy != 1.0) || (affine.tx != 0.0) || (affine.ty != 0.0))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",
(int) (q-p),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetMagickToken(q,&q,token);
point.x=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
point.y=StringToDouble(token,(char **) NULL);
GetMagickToken(q,(const char **) NULL,token);
if (*token == ',')
GetMagickToken(q,&q,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
i++;
if (i < (ssize_t) number_points)
continue;
number_points<<=1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
break;
}
}
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
length=primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
length*=5;
break;
}
case RoundRectanglePrimitive:
{
length*=5+8*BezierQuantum;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates > 107)
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
length=BezierQuantum*primitive_info[j].coordinates;
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetMagickToken(q,&q,token);
length=1;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
length++;
}
length=length*BezierQuantum/2;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
length=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360;
break;
}
default:
break;
}
if ((size_t) (i+length) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=length+1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
}
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceEllipse(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
break;
case PolygonPrimitive:
{
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(primitive_info+j,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
i=(ssize_t) (j+TracePath(primitive_info+j,token));
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetMagickToken(q,&q,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
status=MagickFalse;
else
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetMagickToken(q,&q,token);
primitive_info[j].text=AcquireString(token);
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetMagickToken(q,&q,token);
primitive_info[j].text=AcquireString(token);
break;
}
}
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
if (primitive_info->text != (char *) NULL)
primitive_info->text=(char *) RelinquishMagickMemory(
primitive_info->text);
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
double
length,
offset;
PointInfo
v;
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
length=sqrt(v.x*v.x+v.y*v.y);
if (gradient->spread == RepeatSpread)
return(length);
offset=length/gradient->radius;
return(offset);
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset/=length;
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=repeat/length;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelInfoPixel(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MaxTextExtent];
const char
*geometry,
*path;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) CloneString(&clone_info->primitive,path);
status=DrawImage(*pattern,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PathInfo
*restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) ResetMagickMemory(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(draw_info,path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta < 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta > alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=1.0/alpha;
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (distance != 1.0)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (beta == 0.0)
{
beta=1.0;
if (distance != 1.0)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start,
stop,
y;
/*
Compute bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates == 0)
return(MagickTrue);
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >=
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=(mid+1.0);
bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >=
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=(mid+1.0);
bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >=
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=(mid+1.0);
bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >=
image->rows ? (double) image->rows-1 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if (primitive_info->coordinates == 1)
{
/*
Draw point.
*/
start=(ssize_t) ceil(bounds.y1-0.5);
stop=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=start; y <= stop; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*restrict q;
ssize_t
start,
stop;
if (status == MagickFalse)
continue;
start=(ssize_t) ceil(bounds.x1-0.5);
stop=(ssize_t) floor(bounds.x2+0.5);
x=start;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
(void) GetStrokeColor(draw_info,x,y,&pixel,exception);
SetPixelInfoPixel(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
start=(ssize_t) ceil(bounds.y1-0.5);
stop=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=start; y <= stop; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
register Quantum
*restrict q;
register ssize_t
x;
ssize_t
start,
stop;
if (status == MagickFalse)
continue;
start=(ssize_t) ceil(bounds.x1-0.5);
stop=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start,y,(size_t) (stop-start+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start; x <= stop; x++)
{
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x,y,&fill_color,exception);
fill_alpha=fill_alpha*fill_color.alpha;
CompositePixelOver(image,&fill_color,fill_alpha,q,(double)
GetPixelAlpha(image,q),q);
(void) GetStrokeColor(draw_info,x,y,&stroke_color,exception);
stroke_alpha=stroke_alpha*stroke_color.alpha;
CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double)
GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g %g %g %g %g %g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
(void) GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelInfoPixel(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelInfoPixel(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelInfoPixel(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_image=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MaxTextExtent);
composite_image=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_image == (Image *) NULL)
break;
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,geometry,
exception);
}
if (composite_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if (draw_info->compose == OverCompositeOp)
(void) DrawAffineImage(image,composite_image,&affine,exception);
else
(void) CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(draw_info->dash_pattern[0] != 0.0) &&
((scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(Quantum) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
closed_path=
(primitive_info[i-1].point.x == primitive_info[0].point.x) &&
(primitive_info[i-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
i=(ssize_t) primitive_info[0].coordinates;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(Quantum) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=(double) (10.0*MagickEpsilon);
linecap[2].point.x+=(double) (10.0*MagickEpsilon);
linecap[2].point.y+=(double) (10.0*MagickEpsilon);
linecap[3].point.y+=(double) (10.0*MagickEpsilon);
linecap[4].primitive=UndefinedPrimitive;
(void) DrawPolygonPrimitive(image,draw_info,linecap,exception);
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(Quantum) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
stroke_polygon=TraceStrokePolygon(draw_info,p);
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
if (status == 0)
break;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
q=p+p->coordinates-1;
closed_path=(q->point.x == p->point.x) && (q->point.y == p->point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
DrawRoundLinecap(image,draw_info,p,exception);
DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
const char
*option;
ExceptionInfo
*exception;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) ResetMagickMemory(draw_info,0,sizeof(*draw_info));
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_width=1.0;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_rule=EvenOddRule;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(Quantum) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->debug=IsEventLogging();
if (image_info != (ImageInfo *) NULL)
{
draw_info->stroke_antialias=image_info->antialias;
if (image_info->font != (char *) NULL)
draw_info->font=AcquireString(image_info->font);
if (image_info->density != (char *) NULL)
draw_info->density=AcquireString(image_info->density);
draw_info->text_antialias=image_info->antialias;
if (image_info->pointsize != 0.0)
draw_info->pointsize=image_info->pointsize;
draw_info->border_color=image_info->border_color;
if (image_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(image_info->server_name);
option=GetImageOption(image_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(image_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,(char **) NULL);
option=GetImageOption(image_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,(char **) NULL);
option=GetImageOption(image_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,(char **) NULL);
option=GetImageOption(image_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(image_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(image_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(image_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,(char **) NULL);
option=GetImageOption(image_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(
MagickGravityOptions,MagickFalse,option);
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radii;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radii.x=fabs(center.x-start.x);
radii.y=fabs(center.y-start.y);
TraceEllipse(primitive_info,center,radii,degrees);
}
static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
if ((start.x == end.x) && (start.y == end.y))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x == 0.0) || (radii.y == 0.0))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=(double) (2.0*MagickPI);
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=(double) (2.0*MagickPI);
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(p,4);
p+=p->coordinates;
}
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(PrimitiveInfo *primitive_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coeficients.
*/
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) ||
(points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(primitive_info,start,offset,degrees);
}
static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo stop,const PointInfo degrees)
{
double
delta,
step,
y;
PointInfo
angle,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
if ((stop.x == 0.0) && (stop.y == 0.0))
{
TracePoint(primitive_info,start);
return;
}
delta=2.0/MagickMax(stop.x,stop.y);
step=(double) (MagickPI/8.0);
if ((delta >= 0.0) && (delta < (double) (MagickPI/8.0)))
step=(double) (MagickPI/(4*(MagickPI/delta/2+0.5)));
angle.x=DegreesToRadians(degrees.x);
y=degrees.y;
while (y < degrees.x)
y+=360.0;
angle.y=(double) DegreesToRadians(y);
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
}
static size_t TracePath(PrimitiveInfo *primitive_info,const char *path)
{
char
token[MaxTextExtent];
const char
*p;
int
attribute,
last_attribute;
double
x,
y;
PointInfo
end,
points[4],
point,
start;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
attribute=0;
end.x=0.0;
end.y=0.0;
point.x=0.0;
point.y=0.0;
start.x=0.0;
start.y=0.0;
number_coordinates=0;
z_count=0;
(void) ResetMagickMemory(points,0,sizeof(*points));
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
MagickBooleanType
large_arc,
sweep;
double
angle;
PointInfo
arc;
/*
Compute arc points.
*/
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
arc.x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
arc.y=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
angle=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(q,point,end,arc,angle,large_arc,sweep);
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
if (q != primitive_info)
{
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
}
i=0;
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
TracePoint(q,point);
q+=q->coordinates;
if ((i != 0) && (attribute == (int) 'M'))
{
TracePoint(q,point);
q+=q->coordinates;
}
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=points[2];
points[1]=points[3];
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=points[2];
points[1]=points[3];
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
point=start;
TracePoint(q,point);
q+=q->coordinates;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
z_count++;
break;
}
default:
{
if (isalpha((int) ((unsigned char) attribute)) != 0)
(void) FormatLocaleFile(stderr,"attribute not recognized: %c\n",
attribute);
break;
}
}
}
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
offset,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
offset.x=fabs(end.x-start.x);
offset.y=fabs(end.y-start.y);
if (arc.x > (0.5*offset.x))
arc.x=0.5*offset.x;
if (arc.y > (0.5*offset.y))
arc.y=0.5*offset.y;
point.x=start.x+offset.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+offset.x-arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
TracePoint(p,primitive_info->point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static inline double DrawEpsilonReciprocal(const double x)
{
#define DrawEpsilon ((double) 1.0e-10)
double sign = x < (double) 0.0 ? (double) -1.0 :
(double) 1.0;
return((sign*x) >= DrawEpsilon ? (double) 1.0/x : sign*(
(double) 1.0/DrawEpsilon));
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
LineSegment
dx,
dy,
inverse_slope,
slope,
theta;
MagickBooleanType
closed_path;
double
delta_theta,
dot_product,
mid,
miterlimit;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) ||
(polygon_primitive == (PrimitiveInfo *) NULL))
return((PrimitiveInfo *) NULL);
(void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t)
number_vertices*sizeof(*polygon_primitive));
closed_path=
(primitive_info[number_vertices-1].point.x == primitive_info[0].point.x) &&
(primitive_info[number_vertices-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linejoin == RoundJoin) ||
((draw_info->linejoin == MiterJoin) && (closed_path != MagickFalse)))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
n=(ssize_t) number_vertices-1L;
slope.p=DrawEpsilonReciprocal(dx.p)*dy.p;
inverse_slope.p=(-1.0*DrawEpsilonReciprocal(slope.p));
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*
mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=DrawEpsilonReciprocal(dx.q)*dy.q;
inverse_slope.q=(-1.0*DrawEpsilonReciprocal(slope.q));
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360))
{
max_strokes+=6*BezierQuantum+360;
path_p=(PointInfo *) ResizeQuantumMemory(path_p,(size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) ResizeQuantumMemory(path_q,(size_t) max_strokes,
sizeof(*path_q));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL))
{
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
}
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=(double) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=(double) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
class StmtBitfields {
friend class Stmt;
/// \brief The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class CompoundStmtBitfields {
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
};
class IfStmtBitfields {
friend class IfStmt;
unsigned : NumStmtBits;
unsigned IsConstexpr : 1;
};
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = 17 };
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 2;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
};
class CastExprBitfields {
friend class CastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned BasePathSize : 32 - 6 - NumExprBits;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// \brief If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// \brief The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
union {
StmtBitfields StmtBits;
CompoundStmtBitfields CompoundStmtBits;
IfStmtBitfields IfStmtBits;
ExprBitfields ExprBits;
CharacterLiteralBitfields CharacterLiteralBits;
FloatingLiteralBitfields FloatingLiteralBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
DeclRefExprBitfields DeclRefExprBits;
CastExprBitfields CastExprBits;
CallExprBitfields CallExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
InitListExprBitfields InitListExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
CoawaitExprBitfields CoawaitBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// \brief A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only Expr *
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
struct ExprIterator
: llvm::iterator_adaptor_base<ExprIterator, Stmt **,
std::random_access_iterator_tag, Expr *> {
ExprIterator() : iterator_adaptor_base(nullptr) {}
ExprIterator(Stmt **I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<Expr **>(I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only Expr *
struct ConstExprIterator
: llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *,
std::random_access_iterator_tag,
const Expr *const> {
ConstExprIterator() : iterator_adaptor_base(nullptr) {}
ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<const Expr *const *>(I);
}
};
private:
/// \brief Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// \brief Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) == sizeof(void *),
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getLocStart() const LLVM_READONLY;
SourceLocation getLocEnd() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \brief Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
const ASTContext *Context = nullptr) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip past any implicit AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
Stmt *IgnoreImplicit();
const Stmt *IgnoreImplicit() const {
return const_cast<Stmt *>(this)->IgnoreImplicit();
}
/// \brief Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// \brief Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// \brief Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// \brief Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const {
return DG.isSingleDecl();
}
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
SourceLocation getStartLoc() const { return StartLoc; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
SourceLocation SemiLoc;
/// \brief True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
bool HasLeadingEmptyMacro = false;
public:
friend class ASTStmtReader;
friend class ASTStmtWriter;
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass), SemiLoc(L),
HasLeadingEmptyMacro(hasLeadingEmptyMacro) {}
/// \brief Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return SemiLoc; }
void setSemiLoc(SourceLocation L) { SemiLoc = L; }
bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
SourceLocation LBraceLoc, RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// \brief Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
}
// \brief Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt* const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; }
SourceLocation getLBracLoc() const { return LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
// A pointer to the following CaseStmt or DefaultStmt class,
// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SourceLocation KeywordLoc;
SourceLocation ColonLoc;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return KeywordLoc; }
void setKeywordLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase*>(this)->getSubStmt();
}
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
class CaseStmt : public SwitchCase {
SourceLocation EllipsisLoc;
enum { LHS, RHS, SUBSTMT, END_EXPR };
Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for
// GNU "case 1 ... 4" extension
public:
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
SubExprs[SUBSTMT] = nullptr;
SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs);
SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs);
EllipsisLoc = ellipsisLoc;
}
/// \brief Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) {}
SourceLocation getCaseLoc() const { return KeywordLoc; }
void setCaseLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); }
Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); }
Stmt *getSubStmt() { return SubExprs[SUBSTMT]; }
const Expr *getLHS() const {
return reinterpret_cast<const Expr*>(SubExprs[LHS]);
}
const Expr *getRHS() const {
return reinterpret_cast<const Expr*>(SubExprs[RHS]);
}
const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; }
void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; }
void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); }
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
}
};
class DefaultStmt : public SwitchCase {
Stmt* SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// \brief Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return KeywordLoc; }
void setDefaultLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
inline SourceLocation SwitchCase::getLocEnd() const {
if (const CaseStmt *CS = dyn_cast<CaseStmt>(this))
return CS->getLocEnd();
return cast<DefaultStmt>(this)->getLocEnd();
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public Stmt {
SourceLocation IdentLoc;
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) {
static_assert(sizeof(LabelStmt) ==
2 * sizeof(SourceLocation) + 2 * sizeof(void *),
"LabelStmt too big");
}
// \brief Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return IdentLoc; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// \brief Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public Stmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
SourceLocation AttrLoc;
unsigned NumAttrs;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc),
NumAttrs(Attrs.size()) {
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) {
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr*> Attrs, Stmt *SubStmt);
// \brief Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttrLoc; }
ArrayRef<const Attr*> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt : public Stmt {
enum { INIT, VAR, COND, THEN, ELSE, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation IfLoc;
SourceLocation ElseLoc;
public:
IfStmt(const ASTContext &C, SourceLocation IL,
bool IsConstexpr, Stmt *init, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL = SourceLocation(),
Stmt *elsev = nullptr);
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) {}
/// \brief Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Stmt *getInit() { return SubExprs[INIT]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
const Stmt *getThen() const { return SubExprs[THEN]; }
void setThen(Stmt *S) { SubExprs[THEN] = S; }
const Stmt *getElse() const { return SubExprs[ELSE]; }
void setElse(Stmt *S) { SubExprs[ELSE] = S; }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Stmt *getThen() { return SubExprs[THEN]; }
Stmt *getElse() { return SubExprs[ELSE]; }
SourceLocation getIfLoc() const { return IfLoc; }
void setIfLoc(SourceLocation L) { IfLoc = L; }
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
if (SubExprs[ELSE])
return SubExprs[ELSE]->getLocEnd();
else
return SubExprs[THEN]->getLocEnd();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt : public Stmt {
SourceLocation SwitchLoc;
enum { INIT, VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
// This points to a linked list of case and default statements and, if the
// SwitchStmt is a switch on an enum value, records whether all the enum
// values were covered by CaseStmts. The coverage information value is meant
// to be a hint for possible clients.
llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase;
public:
SwitchStmt(const ASTContext &C, Stmt *Init, VarDecl *Var, Expr *cond);
/// \brief Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) {}
/// \brief Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Stmt *getInit() { return SubExprs[INIT]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Stmt *getBody() const { return SubExprs[BODY]; }
const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); }
/// \brief Set the case list for this switch statement.
void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); }
SourceLocation getSwitchLoc() const { return SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
SubExprs[BODY] = S;
SwitchLoc = SL;
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase()
&& "case/default already added to a switch");
SC->setNextSwitchCase(FirstCase.getPointer());
FirstCase.setPointer(SC);
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { FirstCase.setInt(true); }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const { return FirstCase.getInt(); }
SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd();
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt : public Stmt {
SourceLocation WhileLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
public:
WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL);
/// \brief Build an empty while statement.
explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) {}
/// \brief Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
SourceLocation DoLoc;
enum { BODY, COND, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) {
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
SubExprs[BODY] = body;
}
/// \brief Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getDoLoc() const { return DoLoc; }
void setDoLoc(SourceLocation L) { DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
SourceLocation ForLoc;
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// \brief Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForLoc; }
void setForLoc(SourceLocation L) { ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation GotoLoc;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
/// \brief Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation GotoLoc;
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc,
Expr *target)
: Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc),
Target((Stmt*)target) {}
/// \brief Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
}
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target+1); }
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
SourceLocation ContinueLoc;
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {}
/// \brief Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
SourceLocation BreakLoc;
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {
static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation),
"BreakStmt too large");
}
/// \brief Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt : public Stmt {
SourceLocation RetLoc;
Stmt *RetExpr;
const VarDecl *NRVOCandidate;
public:
explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {}
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
: Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E),
NRVOCandidate(NRVOCandidate) {}
/// \brief Build an empty return expression.
explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) {}
const Expr *getRetValue() const;
Expr *getRetValue();
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); }
SourceLocation getReturnLoc() const { return RetLoc; }
void setReturnLoc(SourceLocation L) { RetLoc = L; }
/// \brief Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return RetExpr ? RetExpr->getLocEnd() : RetLoc;
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
return child_range(child_iterator(), child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// \brief True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// \brief If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// \brief Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); }
SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// \brief Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const {
return Str;
}
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const {
return Names[i];
}
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// \brief Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getLocEnd(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// \brief Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// \brief The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// \brief Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// \brief Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// \brief Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// \brief Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// \brief Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// \brief Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// \brief Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// \brief Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// \brief Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// \brief The number of variable captured, including 'this'.
unsigned NumCaptures;
/// \brief The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind;
/// \brief The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// \brief Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// \brief Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// \brief Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// \brief Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// \brief Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// \brief Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// \brief Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// \brief Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// \brief Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// \brief True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// \brief An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// \brief Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// \brief Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// \brief Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// \brief Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// \brief Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// \brief Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// \brief Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getLocStart() const LLVM_READONLY {
return getCapturedStmt()->getLocStart();
}
SourceLocation getLocEnd() const LLVM_READONLY {
return getCapturedStmt()->getLocEnd();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
NGmerge.c | /*
John M. Gaspar (jsh58@wildcats.unh.edu)
April 2015 (updated 2016, 2017)
Analyzing paired-end reads for overlaps. Two modes:
- 'stitch': producing a single, merged read for reads
with sufficient overlaps
- 'adapter-removal': removing adapters (3' overhangs
of stitched alignment) from individual reads
Version 0.2_dev
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <getopt.h>
#include <zlib.h>
#include <omp.h>
#include "NGmerge.h"
/* void printVersion()
* Print version and copyright.
*/
void printVersion(void) {
fprintf(stderr, "NGmerge, version %s\n", VERSION);
fprintf(stderr, "Copyright (C) 2017 John M. Gaspar (jsh58@wildcats.unh.edu)\n");
exit(-1);
}
/* void usage()
* Prints usage information.
*/
void usage(void) {
fprintf(stderr, "Usage: ./NGmerge {-%c <file> -%c <file>", FIRST, SECOND);
fprintf(stderr, " -%c <file>} [optional arguments]\n", OUTFILE);
fprintf(stderr, "Required arguments:\n");
fprintf(stderr, " -%c <file> Input FASTQ file with reads from forward direction\n", FIRST);
fprintf(stderr, " -%c <file> Input FASTQ file with reads from reverse direction\n", SECOND);
fprintf(stderr, " -%c <file> Output FASTQ file(s):\n", OUTFILE);
fprintf(stderr, " - in 'stitch' mode (def.), the file of merged reads\n");
fprintf(stderr, " - in 'adapter-removal' mode (-%c), the output files\n", ADAPTOPT);
fprintf(stderr, " will be <file>%s and <file>%s\n", ONEEXT, TWOEXT);
fprintf(stderr, "Alignment parameters:\n");
fprintf(stderr, " -%c <int> Minimum overlap of the paired-end reads (def. %d)\n", OVERLAP, DEFOVER);
fprintf(stderr, " -%c <float> Mismatches to allow in the overlapped region\n", MISMATCH);
fprintf(stderr, " (a fraction of the overlap length; def. %.2f)\n", DEFMISM);
fprintf(stderr, " -%c Use 'adapter-removal' mode (also sets -%c option)\n", ADAPTOPT, DOVEOPT);
fprintf(stderr, " -%c Option to check for dovetailing (with 3' overhangs)\n", DOVEOPT);
fprintf(stderr, " -%c <int> Minimum overlap of dovetailed alignments (def. %d)\n", DOVEOVER, DEFDOVE);
fprintf(stderr, " -%c Option to produce shortest stitched read\n", MAXOPT);
fprintf(stderr, "I/O options:\n");
fprintf(stderr, " -%c <file> Log file for stitching results of each read pair\n", LOGFILE);
fprintf(stderr, " -%c <file> FASTQ files for reads that failed stitching\n", UNFILE);
fprintf(stderr, " (output as <file>%s and <file>%s)\n", ONEEXT, TWOEXT);
fprintf(stderr, " -%c <file> Log file for dovetailed reads (adapter sequences)\n", DOVEFILE);
fprintf(stderr, " -%c <file> Log file for formatted alignments of merged reads\n", ALNFILE);
fprintf(stderr, " -%c/-%c Option to gzip (-%c) or not (-%c) FASTQ output(s)\n", GZOPT, UNGZOPT, GZOPT, UNGZOPT);
fprintf(stderr, " -%c Option to produce interleaved FASTQ output(s)\n", INTEROPT);
fprintf(stderr, " -%c <file> Use given error profile for merged qual scores\n", QUALFILE);
fprintf(stderr, " -%c Use 'fastq-join' method for merged qual scores\n", FJOINOPT);
fprintf(stderr, " -%c <int> FASTQ quality offset (def. %d)\n", QUALITY, OFFSET);
fprintf(stderr, " -%c <int> Maximum input quality score (0-based; def. %d)\n", SETQUAL, MAXQUAL);
fprintf(stderr, " -%c <int> Number of threads to use (def. %d)\n", THREADS, DEFTHR);
fprintf(stderr, " -%c Option to print status updates/counts to stderr\n", VERBOSE);
exit(-1);
}
/* int error()
* Prints an error message.
*/
int error(char* msg, enum errCode err) {
fprintf(stderr, "Error! %s%s\n", msg, errMsg[err]);
return -1;
}
/* void* memalloc()
* Allocates a heap block.
*/
void* memalloc(size_t size) {
void* ans = malloc(size);
if (ans == NULL)
exit(error("", ERRMEM));
return ans;
}
/* float getFloat(char*)
* Converts the given char* to a float.
*/
float getFloat(char* in) {
char* endptr;
float ans = strtof(in, &endptr);
if (*endptr != '\0')
exit(error(in, ERRFLOAT));
return ans;
}
/* int getInt(char*)
* Converts the given char* to an int.
*/
int getInt(char* in) {
char* endptr;
int ans = (int) strtol(in, &endptr, 10);
if (*endptr != '\0')
exit(error(in, ERRINT));
return ans;
}
/* char rc(char)
* Returns the complement of the given base.
*/
char rc(char in) {
char out;
if (in == 'A') out = 'T';
else if (in == 'T') out = 'A';
else if (in == 'C') out = 'G';
else if (in == 'G') out = 'C';
else if (in == 'N') out = 'N';
else {
char msg[4] = "' '";
msg[1] = in;
exit(error(msg, ERRUNK));
}
return out;
}
/* char* getLine()
* Reads the next line from a file.
*/
char* getLine(char* line, int size, File in, bool gz) {
if (gz)
return gzgets(in.gzf, line, size);
else
return fgets(line, size, in.f);
}
/* void checkHeaders()
* Ensure headers match (up to first space character);
* create consensus header.
*/
void checkHeaders(char* head1, char* head2, char* header) {
bool ok = false; // match boolean
int j;
for (j = 0; head1[j] != '\n' && head1[j] != '\0'; j++) {
if (head1[j] != head2[j]) {
if (ok)
break;
for ( ; head1[j] != '\n' && head1[j] != '\0'
&& head1[j] != ' '; j++) ;
head1[j] = '\0'; // trim head1 for err msg
exit(error(head1, ERRHEAD));
} else if (head1[j] == ' ')
ok = true; // headers match
else if (head1[j] == '/')
ok = true; // headers match, this is added for HMP data
header[j] = head1[j];
}
if (header[j - 1] == ' ')
header[j - 1] = '\0'; // removing trailing space
else if (header[j - 1] == '/')
header[j - 1] = '\0'; // removing trailing forward slash, added for HMP data
else
header[j] = '\0';
}
/* void checkQual()
* Check given quality string for offset errors.
*/
void checkQual(char* qual, int len, int offset,
int maxQual) {
for (int i = 0; i < len; i++)
// error if qual < 0 or qual > maxQual
if (qual[i] < offset || qual[i] > offset + maxQual) {
char* msg = (char*) memalloc(MAX_SIZE);
sprintf(msg, "(range [0, %d], offset %d) '%c'",
maxQual, offset, qual[i]);
exit(error(msg, ERROFFSET));
}
}
/* void processSeq()
* Process the given sequence; save length;
* for 2nd read, save reversed seq/qual.
*/
void processSeq(char** read, int* len, bool i,
int j, int offset, int maxQual) {
// remove new-line character and save length
int k;
for (k = 0; read[j][k] != '\n' && read[j][k] != '\0'; k++) ;
read[j][k] = '\0';
if (j == SEQ)
*len = k; // save read length
else if (k != *len)
exit(error("", ERRQUAL)); // seq/qual length mismatch
// for 2nd read (i == true), save revComp(seq) or rev(qual)
if (i) {
int dest = j + EXTRA; // save to 'extra' field of read2
int m = 0;
if (j == SEQ) {
dest++; // increment b/c of fastq 'plus' line
for (k--; k > -1; k--)
read[dest][m++] = rc(read[j][k]);
} else
for (k--; k > -1; k--)
read[dest][m++] = read[j][k];
read[dest][m] = '\0';
} else if (j == SEQ)
// check 1st read's sequence for non-ACGTN chars
for (int m = 0; m < k; m++)
rc(read[j][m]);
// check quality scores
if (j == QUAL)
checkQual(read[j], k, offset, maxQual);
}
/* bool loadReads()
* Load a pair of reads. Check formatting, determine
* consensus header. Return false on EOF.
*/
bool loadReads(File in1, File in2, char** read1, char** read2,
char* header, int* len1, int* len2, int offset,
int maxQual, bool gz1, bool gz2) {
// load both reads from input files (LOCK)
bool flag = false; // boolean for EOF
#pragma omp critical
for (int i = 0; i < 2; i++) {
File in = in1;
char** read = read1;
bool gz = gz1;
if (i) {
in = in2;
read = read2;
gz = gz2;
}
// load read (4 lines)
for (int j = 0; j < FASTQ; j++)
if (getLine(read[j], MAX_SIZE, in, gz) == NULL) {
if (j == 0) {
if (i == 0) {
flag = true; // EOF
break;
} else {
int k = 0;
for ( ; read1[HEAD][k] != '\n' && read1[HEAD][k] != '\0'
&& read1[HEAD][k] != ' '; k++) ;
read1[HEAD][k] = '\0'; // trim header for err msg
exit(error(read1[HEAD], ERRHEAD));
}
} else
exit(error("", ERRSEQ));
}
if (flag)
break;
} // (UNLOCK)
if (flag)
return false; // EOF
// check fastq formatting
if (read1[HEAD][0] != BEGIN || read1[PLUS][0] != PLUSCHAR
|| read2[HEAD][0] != BEGIN || read2[PLUS][0] != PLUSCHAR)
exit(error("", ERRFASTQ));
// process sequence/quality lines
processSeq(read1, len1, false, SEQ, offset, maxQual);
processSeq(read1, len1, false, QUAL, offset, maxQual);
processSeq(read2, len2, true, SEQ, offset, maxQual);
processSeq(read2, len2, true, QUAL, offset, maxQual);
// check headers
checkHeaders(read1[HEAD], read2[HEAD], header);
return true;
}
/* float compare()
* Compare two sequences. Return the fraction mismatch.
*/
float compare(char* seq1, char* seq2, int length,
float mismatch, int overlap) {
int mis = 0; // number of mismatches
int len = length; // length of overlap, not counting Ns
float allow = len * mismatch;
for (int i = 0; i < length; i++) {
// do not count Ns
if (seq1[i] == 'N' || seq2[i] == 'N') {
if (--len < overlap || mis > len * mismatch)
return NOTMATCH;
allow = len * mismatch;
} else if (seq1[i] != seq2[i] && ++mis > allow)
return NOTMATCH;
}
return (float) mis / len;
}
/* int findPos()
* Find optimal overlapping position.
* Currently, quality scores are not considered
* (e.g. decreased penalty for a low-quality mismatch).
*/
int findPos (char* seq1, char* seq2, char* qual1,
char* qual2, int len1, int len2, int overlap,
bool dovetail, int doveOverlap, float mismatch,
bool maxLen, float* best) {
// check for regular (non-dovetailed) alignments
int pos = len1 - overlap + 1; // position of match
int i = len1 - overlap;
for ( ; i > -1 && len1 - i <= len2; i--) {
// align sequences
float res = compare(seq1 + i, seq2, len1 - i,
mismatch, overlap);
// compare result
if (res < *best || (res == *best && !maxLen)) {
*best = res;
pos = i;
}
if (res == 0.0f && maxLen)
return pos; // shortcut for exact match
}
// check for dovetailing
if (dovetail) {
// if no regular alignment, reset i
if (i == len1 - overlap)
i = (len1 > len2 ? len1 - len2 - 1 : -1);
// continue decrementing i
for ( ; ; i--) {
float res = NOTMATCH;
if (i >= 0) {
// read1 is longer, with 3' overhang
if (len2 < doveOverlap)
break;
res = compare(seq1 + i, seq2, len2,
mismatch, doveOverlap);
} else if (len1 < len2 + i) {
// read2 has 3' overhang, read1 determines overlap
if (len1 < doveOverlap)
break;
res = compare(seq1, seq2 - i, len1,
mismatch, doveOverlap);
} else {
// read2 has 3' overhang and determines overlap
if (len2 + i < doveOverlap)
break;
res = compare(seq1, seq2 - i, len2 + i,
mismatch, doveOverlap);
}
// compare result
if (res < *best || (res == *best && !maxLen)) {
*best = res;
pos = i;
}
if (res == 0.0f && maxLen)
return pos; // shortcut for exact match
}
}
return pos;
}
/* void printDove()
* Log 3' overhangs of dovetailed reads.
*/
void printDove(File dove, char* header, char** read1,
char** read2, int len1, int len2, int pos,
omp_lock_t* lock) {
if (len1 > len2 + pos || pos < 0) {
omp_set_lock(lock);
fprintf(dove.f, "%s\t%s\t%s\n", header + 1,
len1 > len2 + pos ? read1[SEQ] + len2 + pos : "-",
pos < 0 ? read2[SEQ] + len2 + pos : "-");
omp_unset_lock(lock);
}
}
/* void printGZNoAdapt()
* Print the reads minus adapters (gzip output).
*/
void printGZNoAdapt(gzFile out1, gzFile out2,
char** read1, char** read2, int end1, int end2) {
// print fwd read
gzprintf(out1, "%s", read1[HEAD]);
for (int i = 0; i < end1; i++)
gzputc(out1, read1[SEQ][i]);
gzprintf(out1, "\n%s", read1[PLUS]);
for (int i = 0; i < end1; i++)
gzputc(out1, read1[QUAL][i]);
gzputc(out1, '\n');
// print rev read
gzprintf(out2, "%s", read2[HEAD]);
for (int i = 0; i < end2; i++)
gzputc(out2, read2[SEQ][i]);
gzprintf(out2, "\n%s", read2[PLUS]);
for (int i = 0; i < end2; i++)
gzputc(out2, read2[QUAL][i]);
gzputc(out2, '\n');
}
/* void printNoAdapt()
* Print the reads minus adapters.
*/
void printNoAdapt(FILE* out1, FILE* out2, char** read1,
char** read2, int end1, int end2) {
// print fwd read
fprintf(out1, "%s", read1[HEAD]);
for (int i = 0; i < end1; i++)
fputc(read1[SEQ][i], out1);
fprintf(out1, "\n%s", read1[PLUS]);
for (int i = 0; i < end1; i++)
fputc(read1[QUAL][i], out1);
fputc('\n', out1);
// print rev read
fprintf(out2, "%s", read2[HEAD]);
for (int i = 0; i < end2; i++)
fputc(read2[SEQ][i], out2);
fprintf(out2, "\n%s", read2[PLUS]);
for (int i = 0; i < end2; i++)
fputc(read2[QUAL][i], out2);
fputc('\n', out2);
}
/* bool printResAdapt()
* Control printing of reads minus adapters.
* Return 1 if adapter found, else 0.
*/
bool printResAdapt(File out1, File out2, File dove,
bool doveOpt, char* header, char** read1, char** read2,
int len1, int len2, int pos, float best, bool gz,
omp_lock_t* lock) {
bool adapter = false;
int end1 = len1;
int end2 = len2;
// if found, identify locations of adapters
if (len1 > len2 + pos || pos < 0) {
adapter = true;
if (len1 > len2 + pos)
end1 = len2 + pos;
if (pos < 0)
end2 += pos;
if (doveOpt)
printDove(dove, header, read1, read2,
len1, len2, pos, lock + DOVE);
}
// print output
omp_set_lock(lock + OUT);
if (gz)
printGZNoAdapt(out1.gzf, out2.gzf, read1, read2,
end1, end2);
else
printNoAdapt(out1.f, out2.f, read1, read2,
end1, end2);
omp_unset_lock(lock + OUT);
return adapter;
}
/* void printAln2()
* Printing details of stitch mismatches.
*/
void printAln2(File aln, char* header, char** read1,
char** read2, int len1, int len2, int pos) {
int i = pos;
int j = 0;
if (pos < 0) {
j = -pos;
i = 0;
}
while (i < len1 && j < len2) {
if (read1[SEQ][i] == 'N' || read2[SEQ + EXTRA + 1][j] == 'N'
|| read1[SEQ][i] != read2[SEQ + EXTRA + 1][j])
fprintf(aln.f, "%s\t%d\t%c\t%c\t%c\t%c\n",
header + 1, i, read1[SEQ][i], read1[QUAL][i],
read2[SEQ + EXTRA + 1][j], read2[QUAL + EXTRA][j]);
i++;
j++;
}
}
/* void printAln()
* Print nicely formatted alignment of stitched reads.
*/
void printAln(File aln, char* header, char** read1,
char** read2, int len1, int len2, int pos) {
fprintf(aln.f, "%s\n", header + 1);
// print sequence alignment
fprintf(aln.f, "seq_R1: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n", read1[SEQ]);
// print '|' for matches, ':' for Ns
fprintf(aln.f, " ");
int i;
for (i = 0; i < abs(pos); i++)
fputc(' ', aln.f);
int j = 0;
if (pos < 0) {
j = -pos;
i = 0;
}
while (i < len1 && j < len2) {
fputc((read1[SEQ][i] == 'N' || read2[SEQ + EXTRA + 1][j] == 'N') ?
':' : (read1[SEQ][i] == read2[SEQ + EXTRA + 1][j] ?
'|' : ' '), aln.f);
i++;
j++;
}
fputc('\n', aln.f);
fprintf(aln.f, "seq_R2: ");
for (int i = 0; i < pos; i++)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n\n", read2[SEQ + EXTRA + 1]);
// print quality scores
fprintf(aln.f, "qual_R1: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n", read1[QUAL]);
fprintf(aln.f, "qual_R2: ");
for (int i = 0; i < pos; i++)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n\n", read2[QUAL + EXTRA]);
}
/* void createSeq()
* Create stitched sequence (into seq1, qual1).
* Use empirical error profiles for quality scores,
* or 'fastq-join' method.
*/
void createSeq(char* seq1, char* seq2, char* qual1,
char* qual2, int len1, int len2, int pos,
int offset, char** match, char** mism, bool fjoin) {
int len = len2 + pos; // length of stitched sequence
for (int i = 0; i < len; i++) {
if (i - pos < 0) {
// 1st read only: continue
continue;
} else if (i >= len1) {
// 2nd read only: copy seq and qual
seq1[i] = seq2[i-pos];
qual1[i] = qual2[i-pos];
} else if (seq2[i-pos] == 'N') {
// 2nd read 'N': continue
continue;
} else if (seq1[i] == 'N') {
// 1st read 'N': copy seq and qual
seq1[i] = seq2[i-pos];
qual1[i] = qual2[i-pos];
} else if (seq1[i] != seq2[i-pos]) {
// mismatch:
// - base matches higher quality score or equal
// quality score that is closer to 5' end
// - quality score calculated as diff (fastq-join
// method) or copied from mism array
if (qual1[i] < qual2[i-pos] ||
(qual1[i] == qual2[i-pos] && i >= len / 2.0) )
seq1[i] = seq2[i-pos];
if (fjoin)
qual1[i] = abs(qual2[i-pos] - qual1[i]) + offset;
else
qual1[i] = mism[ (int) qual1[i] - offset ]
[ (int) qual2[i-pos] - offset ] + offset;
} else {
// match:
// - quality score calculated as max (fastq-join
// method) or copied from match array
if (fjoin) {
if (qual1[i] < qual2[i-pos])
qual1[i] = qual2[i-pos];
} else
qual1[i] = match[ (int) qual1[i] - offset ]
[ (int) qual2[i-pos] - offset ] + offset;
}
}
seq1[len] = '\0';
qual1[len] = '\0';
}
/* void printRes()
* Print stitched read.
*/
void printRes(File out, File log, bool logOpt, File dove,
bool doveOpt, File aln, int alnOpt, char* header,
char** read1, char** read2, int len1, int len2,
int pos, float best, int offset, bool gz, bool fjoin,
char** match, char** mism, omp_lock_t* lock) {
// log result
if (logOpt) {
omp_set_lock(lock + LOG);
fprintf(log.f, "%s\t%d\t%d\t", header + 1,
pos < 0 ? (len2+pos < len1 ? len2+pos : len1) :
(len1-pos < len2 ? len1-pos : len2), len2 + pos);
best ? fprintf(log.f, "%.3f", best) : fprintf(log.f, "0");
fprintf(log.f, "\n");
omp_unset_lock(lock + LOG);
}
if (doveOpt)
printDove(dove, header, read1, read2, len1, len2,
pos, lock + DOVE);
// print formatted alignments
if (alnOpt == 1) {
omp_set_lock(lock + ALN);
printAln(aln, header, read1, read2, len1, len2, pos);
// create stitched sequence
createSeq(read1[SEQ], read2[SEQ + EXTRA + 1],
read1[QUAL], read2[QUAL + EXTRA], len1, len2,
pos, offset, match, mism, fjoin);
// print merged seq to alignment output
fprintf(aln.f, "merged\nseq: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n", read1[SEQ]);
fprintf(aln.f, "qual: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n\n\n", read1[QUAL]);
omp_unset_lock(lock + ALN);
} else {
// print stitch differences
if (alnOpt == 2) {
omp_set_lock(lock + ALN);
printAln2(aln, header, read1, read2, len1, len2, pos);
omp_unset_lock(lock + ALN);
}
// create stitched sequence
createSeq(read1[SEQ], read2[SEQ + EXTRA + 1],
read1[QUAL], read2[QUAL + EXTRA], len1, len2,
pos, offset, match, mism, fjoin);
}
// print stitched sequence
omp_set_lock(lock + OUT);
if (gz)
gzprintf(out.gzf, "%s\n%s\n+\n%s\n", header,
read1[SEQ], read1[QUAL]);
else
fprintf(out.f, "%s\n%s\n+\n%s\n", header,
read1[SEQ], read1[QUAL]);
omp_unset_lock(lock + OUT);
}
/* void printFail()
* Print stitch failure reads.
*/
void printFail(File un1, File un2, bool unOpt,
File log, bool logOpt, char* header, char** read1,
char** read2, bool gz, omp_lock_t* outLock,
omp_lock_t* logLock) {
if (logOpt) {
omp_set_lock(logLock);
fprintf(log.f, "%s\t%s\n", header + 1, NA);
omp_unset_lock(logLock);
}
if (unOpt) {
omp_set_lock(outLock);
if (gz) {
gzprintf(un1.gzf, "%s%s\n%s%s\n", read1[HEAD],
read1[SEQ], read1[PLUS], read1[QUAL]);
gzprintf(un2.gzf, "%s%s\n%s%s\n", read2[HEAD],
read2[SEQ], read2[PLUS], read2[QUAL]);
} else {
fprintf(un1.f, "%s%s\n%s%s\n", read1[HEAD],
read1[SEQ], read1[PLUS], read1[QUAL]);
fprintf(un2.f, "%s%s\n%s%s\n", read2[HEAD],
read2[SEQ], read2[PLUS], read2[QUAL]);
}
omp_unset_lock(outLock);
}
}
/* int readFile()
* Analyzes the reads in a set of input files.
* Controls writing to the output file(s).
* Multithreaded.
*/
int readFile(File in1, File in2, File out, File out2,
File un1, File un2, bool unOpt, File log,
bool logOpt, int overlap, bool dovetail, int doveOverlap,
File dove, bool doveOpt, File aln, int alnOpt,
bool adaptOpt, float mismatch, bool maxLen,
int* stitch, int offset, int maxQual,
bool gz1, bool gz2, bool gzOut, bool fjoin,
char** match, char** mism, int threads) {
// initialize omp locks -- out, un, log, dove, aln
omp_lock_t lock[OMP_LOCKS];
for (int i = 0; i < OMP_LOCKS; i++)
omp_init_lock(&lock[i]);
// process files in parallel
int count = 0, stitchRed = 0;
#pragma omp parallel num_threads(threads) reduction(+: count, stitchRed)
{
// allocate memory for both reads
char** read1 = (char**) memalloc(FASTQ * sizeof(char*));
char** read2 = (char**) memalloc((FASTQ + EXTRA) * sizeof(char*));
for (int i = 0; i < FASTQ + EXTRA; i++) {
if (i < FASTQ)
read1[i] = (char*) memalloc(MAX_SIZE);
// for 2nd read, save extra fields for revComp(seq) and rev(qual)
read2[i] = (char*) memalloc(MAX_SIZE);
}
char* header = (char*) memalloc(MAX_SIZE); // consensus header
// process reads
int len1 = 0, len2 = 0; // lengths of reads
while (loadReads(in1, in2, read1, read2, header,
&len1, &len2, offset, maxQual, gz1, gz2)) {
// find optimal overlap
float best = 1.0f;
int pos = findPos(read1[SEQ], read2[SEQ + EXTRA + 1],
read1[QUAL], read2[QUAL + EXTRA], len1, len2, overlap,
dovetail, doveOverlap, mismatch, maxLen, &best);
// print result
if (pos == len1 - overlap + 1) {
// stitch failure
if (adaptOpt)
printFail(out, out2, 1, log, 0, header, read1,
read2, gzOut, lock + OUT, lock + LOG);
else
printFail(un1, un2, unOpt, log, logOpt, header,
read1, read2, gzOut, lock + UN, lock + LOG);
} else {
// stitch success
if (adaptOpt) {
stitchRed += printResAdapt(out, out2, dove, doveOpt,
header, read1, read2, len1, len2, pos, best,
gzOut, lock);
} else {
printRes(out, log, logOpt, dove, doveOpt, aln, alnOpt,
header, read1, read2, len1, len2, pos, best, offset,
gzOut, fjoin, match, mism, lock);
stitchRed++;
}
}
count++;
}
// free memory
free(header);
for (int i = 0; i < FASTQ + EXTRA; i++) {
if (i < FASTQ)
free(read1[i]);
free(read2[i]);
}
free(read1);
free(read2);
} // END parallel
// destroy omp locks
for (int i = 0; i < 5; i++)
omp_destroy_lock(&lock[i]);
*stitch = stitchRed;
return count;
}
/* void openWrite()
* Open a file for writing (stdout if file is '-').
*/
void openWrite(char* outFile, File* out, bool gz) {
if (outFile[0] == '-' && strlen(outFile) > 1)
exit(error(outFile, ERRNAME));
if (gz) {
if (!strcmp(outFile + strlen(outFile) - strlen(GZEXT), GZEXT)
|| !strcmp(outFile, "/dev/null"))
out->gzf = gzopen(outFile, "w");
else if (!strcmp(outFile, "-"))
out->gzf = gzdopen(fileno(stdout), "wb");
else {
// add ".gz" to outFile
char* outFile2 = memalloc(strlen(outFile)
+ strlen(GZEXT) + 1);
strcpy(outFile2, outFile);
strcat(outFile2, GZEXT);
out->gzf = gzopen(outFile2, "w");
free(outFile2);
}
if (out->gzf == NULL)
exit(error(outFile, ERROPENW));
} else {
out->f = (strcmp(outFile, "-") ?
fopen(outFile, "w") : stdout);
if (out->f == NULL)
exit(error(outFile, ERROPENW));
}
}
/* void openFiles()
* Opens output files for the program,
* adjusting file names/extensions as needed.
*/
void openFiles(char* outFile, File* out, File* out2,
char* unFile, File* un1, File* un2,
char* logFile, File* log,
char* doveFile, File* dove, bool dovetail,
char* alnFile, File* aln,
bool adaptOpt, bool gz, bool interOpt) {
if (adaptOpt) {
if (interOpt)
openWrite(outFile, out, gz);
else if (! strcmp(outFile, "-"))
exit(error("stdout + \"_1.fastq\"", ERROPENW));
else if (! strcmp(outFile, "/dev/null")) {
openWrite(outFile, out, gz);
openWrite(outFile, out2, gz);
} else {
// add "_1.fastq" and "_2.fastq" extensions
int add = strlen(ONEEXT) > strlen(TWOEXT) ?
strlen(ONEEXT) + 1 : strlen(TWOEXT) + 1;
char* outFile2 = memalloc(strlen(outFile) + add);
strcpy(outFile2, outFile);
strcat(outFile2, ONEEXT);
openWrite(outFile2, out, gz);
strcpy(outFile2, outFile);
strcat(outFile2, TWOEXT);
openWrite(outFile2, out2, gz);
free(outFile2);
}
} else {
openWrite(outFile, out, gz);
// open optional files
if (unFile != NULL) {
if (interOpt)
openWrite(unFile, un1, gz);
else if (! strcmp(unFile, "-"))
exit(error("stdout + \"_1.fastq\"", ERROPENW));
else {
// add "_1.fastq" and "_2.fastq" extensions
int add = strlen(ONEEXT) > strlen(TWOEXT) ?
strlen(ONEEXT) + 1 : strlen(TWOEXT) + 1;
char* unFile2 = memalloc(strlen(unFile) + add);
strcpy(unFile2, unFile);
strcat(unFile2, ONEEXT);
openWrite(unFile2, un1, gz);
strcpy(unFile2, unFile);
strcat(unFile2, TWOEXT);
openWrite(unFile2, un2, gz);
free(unFile2);
}
}
if (logFile != NULL) {
openWrite(logFile, log, false);
fprintf(log->f, "Read\tOverlapLen\tStitchedLen\tMismatch\n");
}
if (alnFile != NULL)
openWrite(alnFile, aln, false);
}
if (dovetail && doveFile != NULL) {
openWrite(doveFile, dove, false);
fprintf(dove->f, "Read\tAdapter_R1\tAdapter_R2\n");
}
}
/* bool openRead()
* Open a file for reading (stdin if file is '-').
* Return true if gzip compressed.
*/
bool openRead(char* inFile, File* in) {
// open file or stdin
bool stdinBool = (strcmp(inFile, "-") ? false : true);
FILE* dummy = (stdinBool ? stdin : fopen(inFile, "r"));
if (dummy == NULL)
exit(error(inFile, ERROPEN));
// check for gzip compression: magic number 0x1F, 0x8B
bool gzip = true;
int save = 0; // first char to pushback (for stdin)
int i, j;
for (i = 0; i < 2; i++) {
j = fgetc(dummy);
if (j == EOF)
exit(error(inFile, ERROPEN));
if ( (i && (unsigned char) j != 0x8B)
|| (! i && (unsigned char) j != 0x1F) ) {
gzip = false;
break;
}
if (! i)
save = j;
}
// for stdin, push back chars
if (stdinBool) {
if (gzip)
exit(error("", ERRGZIP));
if (ungetc(j, dummy) == EOF)
exit(error("", ERRUNGET));
if (i && ungetc(save, dummy) == EOF)
exit(error("", ERRUNGET));
}
// open file
if (gzip) {
if (fclose(dummy))
exit(error("<dummy>", ERRCLOSE));
in->gzf = gzopen(inFile, "r");
if (in->gzf == NULL)
exit(error(inFile, ERROPEN));
} else {
if (! stdinBool)
rewind(dummy);
in->f = dummy;
}
return gzip;
}
/* void loadQual()
* Load quality score profiles from file.
*/
void loadQual(char* qualFile, int maxQual,
char*** match, char*** mism) {
File qual;
bool gz = openRead(qualFile, &qual);
char* line = memalloc(MAX_SIZE);
char** arr = NULL; // array to save to
int i = 0, matIdx = 0, misIdx = 0; // array indices
while (getLine(line, MAX_SIZE, qual, gz) != NULL) {
if (line[0] == '#' || line[0] == '\n') {
// determine target array
i = 0;
if (! strcmp(line + 1, "match\n"))
arr = *match;
else if (! strcmp(line + 1, "mismatch\n"))
arr = *mism;
} else if (arr == NULL) {
continue;
} else {
// remove trailing '\n'
int j;
for (j = 0; line[j] != '\n' && line[j] != '\0'; j++) ;
line[j] = '\0';
// save values to array
char* tok = strtok(line, CSV);
for (j = 0; j < maxQual + 1; j++) {
if (tok == NULL) {
char* msg = (char*) memalloc(MAX_SIZE);
sprintf(msg, "(range [0, %d]) %s",
maxQual, qualFile);
exit(error(msg, ERRRANGE));
}
arr[i][j] = getInt(tok);
tok = strtok(NULL, CSV);
}
i++;
if ( (arr == *match && ++matIdx > maxQual)
|| (arr == *mism && ++misIdx > maxQual) )
arr = NULL;
}
}
// make sure all values were loaded
if (matIdx < maxQual + 1 || misIdx < maxQual + 1) {
char* msg = (char*) memalloc(MAX_SIZE);
sprintf(msg, "(range [0, %d]) %s", maxQual, qualFile);
exit(error(msg, ERRRANGE));
}
if ( (gz && gzclose(qual.gzf) != Z_OK) ||
(! gz && fclose(qual.f) ) )
exit(error(qualFile, ERRCLOSE));
free(line);
}
/* void saveQual()
* Save quality score profiles.
*/
void saveQual(char* qualFile, int maxQual,
char*** match, char*** mism) {
// allocate memory
*match = (char**) memalloc((maxQual + 1) * sizeof(char*));
*mism = (char**) memalloc((maxQual + 1) * sizeof(char*));
for (int i = 0; i < maxQual + 1; i++) {
(*match)[ i ] = (char*) memalloc(maxQual + 1);
(*mism)[ i ] = (char*) memalloc(maxQual + 1);
}
if (qualFile == NULL) {
// copy quality profile from const arrays
if (maxQual > MAXQUAL)
exit(error("", ERRDEFQ));
for (int i = 0; i < maxQual + 1; i++)
for (int j = 0; j < maxQual + 1; j++) {
(*match)[ i ][ j ] = match_profile[ i ][ j ];
(*mism)[ i ][ j ] = mismatch_profile[ i ][ j ];
}
} else
// load from file
loadQual(qualFile, maxQual, match, mism);
}
/* void runProgram()
* Controls the opening/closing of files,
* and analysis by readFile().
*/
void runProgram(char* outFile, char* inFile1,
char* inFile2, bool inter, char* unFile,
char* logFile, int overlap, bool dovetail,
char* doveFile, int doveOverlap, char* alnFile,
int alnOpt, bool adaptOpt, int gzOut, bool fjoin,
bool interOpt, float mismatch, bool maxLen,
int offset, int maxQual, char* qualFile,
bool verbose, int threads) {
// get first set of input file names
char* end1, *end2;
char* file1 = strtok_r(inFile1, COM, &end1);
char* file2 = file1;
if (! inter)
file2 = strtok_r(inFile2, COM, &end2);
// loop through input files
File out, out2, un1, un2, log, dove, aln; // output files
char** match = NULL, **mism = NULL; // quality score profiles
int i = 0; // count of files processed
int tCount = 0, tStitch = 0; // counting variables
while (file1 && file2) {
// open input files
File in1, in2;
bool gz1 = openRead(file1, &in1);
bool gz2 = gz1;
if (! inter)
gz2 = openRead(file2, &in2);
// on first iteration, load quals and open outputs
if (! i) {
// load quality score profile
if (! fjoin && ! adaptOpt)
saveQual(qualFile, maxQual, &match, &mism);
// open output files
if (gzOut == -1)
gzOut = 0;
else if (gz1 || gz2)
gzOut = 1;
openFiles(outFile, &out, &out2,
unFile, &un1, &un2, logFile, &log,
doveFile, &dove, dovetail, alnFile, &aln,
adaptOpt, gzOut, interOpt);
}
// process files
if (verbose)
fprintf(stderr, "Processing files: %s,%s\n", file1,
inter ? "(interleaved)" : file2);
int stitch = 0; // counting variable
int count = readFile(in1, inter ? in1 : in2,
out, interOpt ? out : out2,
un1, interOpt ? un1 : un2, unFile != NULL,
log, logFile != NULL,
overlap, dovetail, doveOverlap, dove,
dovetail && doveFile != NULL, aln, alnOpt,
adaptOpt, mismatch, maxLen, &stitch,
offset, maxQual, gz1, gz2, gzOut, fjoin,
match, mism, threads);
tCount += count;
tStitch += stitch;
// log counts
if (verbose) {
fprintf(stderr, " Fragments (pairs of reads) analyzed: %d\n", count);
if (adaptOpt)
fprintf(stderr, " Adapters removed: %d\n", stitch);
else
fprintf(stderr, " Successfully stitched: %d\n", stitch);
}
// close input files
if ( (gz1 && gzclose(in1.gzf) != Z_OK)
|| (! gz1 && fclose(in1.f)) )
exit(error(file1, ERRCLOSE));
if ( ! inter && ( (gz2 && gzclose(in2.gzf) != Z_OK)
|| (! gz2 && fclose(in2.f)) ) )
exit(error(file2, ERRCLOSE));
file1 = strtok_r(NULL, COM, &end1);
file2 = file1;
if (! inter)
file2 = strtok_r(NULL, COM, &end2);
i++;
}
if (verbose && i > 1) {
fprintf(stderr, "Total counts\n");
fprintf(stderr, " Fragments (pairs of reads) analyzed: %d\n", tCount);
if (adaptOpt)
fprintf(stderr, " Adapters removed: %d\n", tStitch);
else
fprintf(stderr, " Successfully stitched: %d\n", tStitch);
}
// free memory for qual score profiles
if (! fjoin && ! adaptOpt) {
for (int i = 0; i < maxQual + 1; i++) {
free(match[i]);
free(mism[i]);
}
free(match);
free(mism);
}
// close files
if (gzOut) {
if ( gzclose(out.gzf) != Z_OK ||
(adaptOpt && ! interOpt && gzclose(out2.gzf) != Z_OK) )
exit(error(outFile, ERRCLOSE));
if ( unFile != NULL && (gzclose(un1.gzf) != Z_OK ||
(! interOpt && gzclose(un2.gzf) != Z_OK)) )
exit(error(unFile, ERRCLOSE));
} else {
if ( fclose(out.f) ||
(adaptOpt && ! interOpt && fclose(out2.f)) )
exit(error(outFile, ERRCLOSE));
if ( unFile != NULL && (fclose(un1.f) ||
(! interOpt && fclose(un2.f)) ) )
exit(error(unFile, ERRCLOSE));
}
if (logFile != NULL && fclose(log.f))
exit(error(logFile, ERRCLOSE));
if (dovetail && doveFile != NULL && fclose(dove.f))
exit(error(doveFile, ERRCLOSE));
if (alnFile != NULL && fclose(aln.f))
exit(error(alnFile, ERRCLOSE));
}
/* void getArgs()
* Parse the command-line. Check for errors.
*/
void getArgs(int argc, char** argv) {
// default parameters/filenames
char* outFile = NULL, *inFile1 = NULL, *inFile2 = NULL,
*unFile = NULL, *logFile = NULL, *doveFile = NULL,
*alnFile = NULL, *qualFile = NULL;
int overlap = DEFOVER, doveOverlap = DEFDOVE, gzOut = 0,
offset = OFFSET, maxQual = MAXQUAL, threads = DEFTHR;
float mismatch = DEFMISM;
bool dovetail = false, adaptOpt = false, maxLen = true,
diffOpt = false, interOpt = false, fjoin = false,
verbose = false;
// parse argv
int c;
while ( (c = getopt_long(argc, argv, OPTIONS, long_options, NULL)) != -1 )
switch (c) {
case HELP: usage(); break;
case VERSOPT: printVersion(); break;
case MAXOPT: maxLen = false; break;
case DOVEOPT: dovetail = true; break;
case ADAPTOPT: adaptOpt = true; break;
case GZOPT: gzOut = 1; break;
case UNGZOPT: gzOut = -1; break;
case DIFFOPT: diffOpt = true; break;
case INTEROPT: interOpt = true; break;
case FJOINOPT: fjoin = true; break;
case VERBOSE: verbose = true; break;
case OUTFILE: outFile = optarg; break;
case FIRST: inFile1 = optarg; break;
case SECOND: inFile2 = optarg; break;
case UNFILE: unFile = optarg; break;
case LOGFILE: logFile = optarg; break;
case DOVEFILE: doveFile = optarg; break;
case ALNFILE: alnFile = optarg; break;
case OVERLAP: overlap = getInt(optarg); break;
case DOVEOVER: doveOverlap = getInt(optarg); break;
case MISMATCH: mismatch = getFloat(optarg); break;
case QUALITY: offset = getInt(optarg); break;
case SETQUAL: maxQual = getInt(optarg); break;
case QUALFILE: qualFile = optarg; break;
case THREADS: threads = getInt(optarg); break;
default: exit(-1);
}
if (optind < argc)
exit(error(argv[optind], ERRPARAM));
// check for argument errors
if (outFile == NULL || inFile1 == NULL) {
error("", ERRFILE);
usage();
}
bool inter = false; // interleaved input
if (inFile2 == NULL) {
if (verbose)
fprintf(stderr, "Warning: only one input file specified -- assuming interleaved\n");
inter = true;
}
if (qualFile != NULL)
fjoin = false; // given qualFile takes precedence over fastq-join method
if (overlap <= 0 || doveOverlap <= 0)
exit(error("", ERROVER));
if (mismatch < 0.0f || mismatch >= 1.0f)
exit(error("", ERRMISM));
if (threads < 1)
exit(error("", ERRTHREAD));
// adjust parameters for adapter-removal mode
if (adaptOpt) {
dovetail = true;
unFile = logFile = alnFile = qualFile = NULL;
}
int alnOpt = (alnFile != NULL ? (diffOpt ? 2 : 1) : 0);
// send arguments to runProgram()
runProgram(outFile, inFile1, inFile2, inter, unFile,
logFile, overlap, dovetail, doveFile, doveOverlap,
alnFile, alnOpt, adaptOpt, gzOut, fjoin, interOpt,
mismatch, maxLen, offset, maxQual, qualFile, verbose,
threads);
}
/* int main()
* Main.
*/
int main(int argc, char* argv[]) {
getArgs(argc, argv);
return 0;
}
|
Compute.h | #ifndef COMPUTE_H_INCLUDED
#define COMPUTE_H_INCLUDED
#include <stdio.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <immintrin.h>
#include "Grad.h"
inline static __m128d fsqrt(__m128d val)
{
return _mm_castsi128_pd(0x1ff7770000000000+(_mm_castpd_si128(val)>>1)); //2nd root
}
inline static __m128d normalize(__m128d val)
{
return _mm_castsi128_pd(0x2ff2ff0000000000+_mm_srli_epi64(_mm_castpd_si128(val),2)); //4th root
}
inline static void Screenshot(double m,double ph, double pv,int iter, int res,char mode,char mop,double mcx,double mcy)
{
char file[30];
int height=HEIGHT*res, width=WIDTH*res;
short int rac,bac,gac;
int off,i,j,off1,l,iters=iter*0.85,off2;
unsigned char *pixels = malloc(height*4*width),tcb,tcg,tcr;
double prex=(width*(-0.5)+1.0*m*ph*res),prey=(height*(-0.5)-1.0*m*pv*res);
__m128d zx,zy,cx,cy,x,y,four,mask,inv= _mm_set1_pd(1.0/(360.0*m*res)),sum;
__m128d k,iterace,one,avg,avg1,smooth,smooth1,xy,xy1;
__m128d zx1,zy1,cx1,cy1,x1,y1,mask1,sum1,k1;
__m128d mask3=_mm_set1_pd(-0.);
__m128d iter20=_mm_set1_pd(iter/100.0);
__m128d zero=_mm_set1_pd(0.0);
iterace=_mm_set1_pd(iter);
one=_mm_set1_pd(1.0);
four= _mm_set1_pd(100.0);
if(mode)
{
if(mop)
{
cx1=cx=_mm_set1_pd(mcx);
cy1=cy=_mm_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=3; i<height-3; i+=3)
{
for(j=3; j<width-3; j+=3)
{
if(j<3)continue;
off = 4*width*i+(j<<2);
off1 = 4*width*(i+1)+(j<<2);
off2 = 4*width*(i+2)+(j<<2);
x=_mm_setr_pd(j+prex,j+2+prex);
x1=x=_mm_mul_pd(x,inv);
y=_mm_setr_pd(i+prey,i+prey);
y1=_mm_setr_pd(i+prey+2,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
x=_mm_setr_pd(j+1+prex,j+prex);
x=_mm_mul_pd(x,inv);
x1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=_mm_mul_pd(x1,inv);
y=_mm_setr_pd(i+prey,i+prey+1);
y1=_mm_setr_pd(i+prey+1,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr,rac,bac,gac)
for(i=3; i<height-3; i+=3)
{
for(j=3; j<width-3; j+=3)
{
if(j<3)continue;
off = 4*width*i+(j<<2);
off1 = 4*width*(i+1)+(j<<2);
off2 = 4*width*(i+2)+(j<<2);
cx=_mm_setr_pd(j+prex,j+2+prex);
x1=cx1=x=cx=_mm_mul_pd(cx,inv);
cy=_mm_setr_pd(i+prey,i+prey);
cy1=_mm_setr_pd(i+prey+2,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
cx=_mm_setr_pd(j+1+prex,j+prex);
x=cx=_mm_mul_pd(cx,inv);
cx1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=cx1=_mm_mul_pd(cx1,inv);
cy=_mm_setr_pd(i+prey,i+prey+1);
cy1=_mm_setr_pd(i+prey+1,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm_set1_pd(mcx);
cy1=cy=_mm_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=3; i<height-3; i+=3)
{
for(j=3; j<width-3; j+=3)
{
if(j<3)continue;
off = 4*width*i+(j<<2);
off1 = 4*width*(i+1)+(j<<2);
off2 = 4*width*(i+2)+(j<<2);
x=_mm_setr_pd(j+prex,j+2+prex);
x1=x=_mm_mul_pd(x,inv);
y=_mm_setr_pd(i+prey,i+prey);
y1=_mm_setr_pd(i+prey+2,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
x=_mm_setr_pd(j+1+prex,j+prex);
x=_mm_mul_pd(x,inv);
x1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=_mm_mul_pd(x1,inv);
y=_mm_setr_pd(i+prey,i+prey+1);
y1=_mm_setr_pd(i+prey+1,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr,rac,bac,gac)
for(i=3; i<height-3; i+=3)
{
for(j=3; j<width-3; j+=3)
{
if(j<3)continue;
off = 4*width*i+(j<<2);
off1 = 4*width*(i+1)+(j<<2);
off2 = 4*width*(i+2)+(j<<2);
cx=_mm_setr_pd(j+prex,j+2+prex);
x1=cx1=x=cx=_mm_mul_pd(cx,inv);
cy=_mm_setr_pd(i+prey,i+prey);
cy1=_mm_setr_pd(i+prey+2,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
cx=_mm_setr_pd(j+1+prex,j+prex);
x=cx=_mm_mul_pd(cx,inv);
cx1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=cx1=_mm_mul_pd(cx1,inv);
cy=_mm_setr_pd(i+prey,i+prey+1);
cy1=_mm_setr_pd(i+prey+1,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
}
SDL_Surface *surf = SDL_CreateRGBSurfaceFrom(pixels, width, height, 8*4, width*4, 0, 0, 0, 0);
sprintf_s(file,30,"images/%d.bmp",time(NULL));
SDL_SaveBMP(surf,file);
SDL_FreeSurface(surf);
free(pixels);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline static void Render(unsigned char *pixels,double m,double ph, double pv,int iter, char mode,char mop,double mcx,
double mcy,char index,char index2,char index3
)
{
double prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__m128d zx,zy,cx,cy,x,y,four,mask,inv,sum,avg,avg1,smooth,smooth1;
__m128d zx1,zy1,cx1,cy1,x1,y1,mask1,sum1,k1,xy,xy1;
__m128d k,iterace,one;
__m128d zero=_mm_set1_pd(0.0);
__m128d mask3=_mm_set1_pd(-0.);
__m128d iter20=_mm_set1_pd(iter/100.0);
iterace=_mm_set1_pd(iter);
one=_mm_set1_pd(1.0);
four= _mm_set1_pd(100.0);
inv= _mm_set1_pd(1.0/(360.0*m));
int off,i,j,off1,l,iters=iter*0.85,off2;
unsigned char tcb,tcg,tcr;
short int rac,bac,gac;
if(mode)
{
if(mop)
{
cx1=cx=_mm_set1_pd(mcx);
cy1=cy=_mm_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3; j<WIDTH-3; j+=3+index2*3)
{
if(j<3)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
x=_mm_setr_pd(j+prex,j+2+prex);
x1=x=_mm_mul_pd(x,inv);
y=_mm_setr_pd(i+prey,i+prey);
y1=_mm_setr_pd(i+prey+2,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
x=_mm_setr_pd(j+1+prex,j+prex);
x=_mm_mul_pd(x,inv);
x1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=_mm_mul_pd(x1,inv);
y=_mm_setr_pd(i+prey,i+prey+1);
y1=_mm_setr_pd(i+prey+1,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr,rac,bac,gac)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3; j<WIDTH-3; j+=3+index2*3)
{
if(j<3)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
cx=_mm_setr_pd(j+prex,j+2+prex);
x1=cx1=x=cx=_mm_mul_pd(cx,inv);
cy=_mm_setr_pd(i+prey,i+prey);
cy1=_mm_setr_pd(i+prey+2,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
cx=_mm_setr_pd(j+1+prex,j+prex);
x=cx=_mm_mul_pd(cx,inv);
cx1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=cx1=_mm_mul_pd(cx1,inv);
cy=_mm_setr_pd(i+prey,i+prey+1);
cy1=_mm_setr_pd(i+prey+1,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm_set1_pd(mcx);
cy1=cy=_mm_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3; j<WIDTH-3; j+=3+index2*3)
{
if(j<3)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
x=_mm_setr_pd(j+prex,j+2+prex);
x1=x=_mm_mul_pd(x,inv);
y=_mm_setr_pd(i+prey,i+prey);
y1=_mm_setr_pd(i+prey+2,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
x=_mm_setr_pd(j+1+prex,j+prex);
x=_mm_mul_pd(x,inv);
x1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=_mm_mul_pd(x1,inv);
y=_mm_setr_pd(i+prey,i+prey+1);
y1=_mm_setr_pd(i+prey+1,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr,rac,bac,gac)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3; j<WIDTH-3; j+=3+index2*3)
{
if(j<3)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
cx=_mm_setr_pd(j+prex,j+2+prex);
x1=cx1=x=cx=_mm_mul_pd(cx,inv);
cy=_mm_setr_pd(i+prey,i+prey);
cy1=_mm_setr_pd(i+prey+2,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
cx=_mm_setr_pd(j+1+prex,j+prex);
x=cx=_mm_mul_pd(cx,inv);
cx1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=cx1=_mm_mul_pd(cx1,inv);
cy=_mm_setr_pd(i+prey,i+prey+1);
cy1=_mm_setr_pd(i+prey+1,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
}
}
inline static void RenderCol(unsigned char *pixels,double m,double ph, double pv,int iter, char mode,char mop,double mcx,
double mcy,char index,char index2,char index3
)
{
double prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__m128d zx,zy,cx,cy,x,y,four,mask,inv,sum,avg,avg1,smooth,smooth1;
__m128d zx1,zy1,cx1,cy1,x1,y1,mask1,sum1,k1,xy,xy1;
__m128d k,iterace,one;
__m128d zero=_mm_set1_pd(0.0);
__m128d mask3=_mm_set1_pd(-0.);
__m128d iter20=_mm_set1_pd(iter/100.0);
iterace=_mm_set1_pd(iter);
one=_mm_set1_pd(1.0);
four= _mm_set1_pd(100.0);
inv= _mm_set1_pd(1.0/(360.0*m));
int off,i,j,off1,l,iters=iter*0.85,off2;
unsigned char tcb,tcg,tcr;
short int rac,bac,gac;
if(mode)
{
if(mop)
{
cx1=cx=_mm_set1_pd(mcx);
cy1=cy=_mm_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3+2*WIDTH/5-3; j<WIDTH-3; j+=3+index2*3)
{
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
x=_mm_setr_pd(j+prex,j+2+prex);
x1=x=_mm_mul_pd(x,inv);
y=_mm_setr_pd(i+prey,i+prey);
y1=_mm_setr_pd(i+prey+2,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
x=_mm_setr_pd(j+1+prex,j+prex);
x=_mm_mul_pd(x,inv);
x1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=_mm_mul_pd(x1,inv);
y=_mm_setr_pd(i+prey,i+prey+1);
y1=_mm_setr_pd(i+prey+1,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr,rac,bac,gac)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3+2*WIDTH/5-3; j<WIDTH-3; j+=3+index2*3)
{
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
cx=_mm_setr_pd(j+prex,j+2+prex);
x1=cx1=x=cx=_mm_mul_pd(cx,inv);
cy=_mm_setr_pd(i+prey,i+prey);
cy1=_mm_setr_pd(i+prey+2,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
cx=_mm_setr_pd(j+1+prex,j+prex);
x=cx=_mm_mul_pd(cx,inv);
cx1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=cx1=_mm_mul_pd(cx1,inv);
cy=_mm_setr_pd(i+prey,i+prey+1);
cy1=_mm_setr_pd(i+prey+1,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l>iters)
{
avg=_mm_max_pd(sum,avg),avg1=_mm_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_blendv_pd(k,0.25*avg,mask);
k1=_mm_blendv_pd(k1,0.25*avg1,mask1);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm_set1_pd(mcx);
cy1=cy=_mm_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3+2*WIDTH/5-3; j<WIDTH-3; j+=3+index2*3)
{
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
x=_mm_setr_pd(j+prex,j+2+prex);
x1=x=_mm_mul_pd(x,inv);
y=_mm_setr_pd(i+prey,i+prey);
y1=_mm_setr_pd(i+prey+2,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
x=_mm_setr_pd(j+1+prex,j+prex);
x=_mm_mul_pd(x,inv);
x1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=_mm_mul_pd(x1,inv);
y=_mm_setr_pd(i+prey,i+prey+1);
y1=_mm_setr_pd(i+prey+1,i+prey+2);
y=_mm_mul_pd(y,inv);
y1=_mm_mul_pd(y1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr,rac,bac,gac)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3+2*WIDTH/5-3; j<WIDTH-3; j+=3+index2*3)
{
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
cx=_mm_setr_pd(j+prex,j+2+prex);
x1=cx1=x=cx=_mm_mul_pd(cx,inv);
cy=_mm_setr_pd(i+prey,i+prey);
cy1=_mm_setr_pd(i+prey+2,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
rac=bac=gac=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k1[0]);
gac+=pixels[off2+1] = colg(k1[0]);
rac+=pixels[off2+2] = colr(k1[0]);
bac+=pixels[off2+8] = colb(k1[1]);
gac+=pixels[off2+9] = colg(k1[1]);
rac+=pixels[off2+10] = colr(k1[1]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else
{
cx=_mm_setr_pd(j+1+prex,j+prex);
x=cx=_mm_mul_pd(cx,inv);
cx1=_mm_setr_pd(j+2+prex,j+prex+1);
x1=cx1=_mm_mul_pd(cx1,inv);
cy=_mm_setr_pd(i+prey,i+prey+1);
cy1=_mm_setr_pd(i+prey+1,i+prey+2);
y=cy=_mm_mul_pd(cy,inv);
y1=cy1=_mm_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm_setzero_pd();
l=0;
do
{
zx=_mm_mul_pd(x,x);
zx1=_mm_mul_pd(x1,x1);
zy=_mm_mul_pd(y,y);
zy1=_mm_mul_pd(y1,y1);
sum=_mm_add_pd(zy,zx);
sum1=_mm_add_pd(zy1,zx1);
xy=_mm_mul_pd(y,x);
xy1=_mm_mul_pd(y1,x1);
y=_mm_add_pd(xy,xy);
y1=_mm_add_pd(xy1,xy1);
y=_mm_andnot_pd(mask3,y);
y1=_mm_andnot_pd(mask3,y1);
y=_mm_add_pd(y,cy);
y1=_mm_add_pd(y1,cy1);
x=_mm_sub_pd(zx,zy);
x1=_mm_sub_pd(zx1,zy1);
x=_mm_add_pd(x,cx);
x1=_mm_add_pd(x1,cx1);
mask= _mm_cmplt_pd(sum,four);
mask1= _mm_cmplt_pd(sum1,four);
k=_mm_add_pd(k,_mm_and_pd(one,mask));
k1=_mm_add_pd(k1,_mm_and_pd(one,mask1));
if(l<50)
{
avg=_mm_add_pd(avg,sum),avg1=_mm_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm_blendv_pd(smooth,sum,mask);
smooth1=_mm_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm_movemask_pd(mask)||_mm_movemask_pd(mask1)));
mask=_mm_cmpeq_pd(k,iterace);
mask1=_mm_cmpeq_pd(k1,iterace);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm_blendv_pd(k,avg*iter20,mask);
k1=_mm_blendv_pd(k1,avg1*iter20,mask1);
k=_mm_div_pd(k,iterace);
k1=_mm_div_pd(k1,iterace);
k=_mm_min_pd(k,one);
k1=_mm_min_pd(k1,one);
k=_mm_max_pd(k,zero);k*=8000.0;
k1=_mm_max_pd(k1,zero);k1*=8000.0;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k1[0]);
gac+=pixels[off1+9] = colg(k1[0]);
rac+=pixels[off1+10] = colr(k1[0]);
bac+=pixels[off2+4] = colb(k1[1]);
gac+=pixels[off2+5] = colg(k1[1]);
rac+=pixels[off2+6] = colr(k1[1]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
}
}
}
#endif // COMPUTE_H_INCLUDED
|
convolution_3x3_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps(bias + p * 8) : _mm256_setzero_ps();
out.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* kptr = kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__m256 _sum00 = _mm256_loadu_ps(outptr);
__m256 _sum01 = _mm256_setzero_ps();
__m256 _sum10 = _mm256_loadu_ps(outptr + 8);
__m256 _sum11 = _mm256_setzero_ps();
__m256 _r000 = _mm256_broadcast_ss(r0 + 0);
__m256 _r001 = _mm256_broadcast_ss(r0 + 1);
__m256 _r002 = _mm256_broadcast_ss(r0 + 2);
__m256 _r003 = _mm256_broadcast_ss(r0 + 3);
__m256 _r004 = _mm256_broadcast_ss(r0 + 4);
__m256 _r005 = _mm256_broadcast_ss(r0 + 5);
__m256 _r006 = _mm256_broadcast_ss(r0 + 6);
__m256 _r007 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = _mm256_loadu_ps(kptr);
__m256 _k01 = _mm256_loadu_ps(kptr + 8);
__m256 _k02 = _mm256_loadu_ps(kptr + 16);
__m256 _k03 = _mm256_loadu_ps(kptr + 24);
__m256 _k04 = _mm256_loadu_ps(kptr + 32);
__m256 _k05 = _mm256_loadu_ps(kptr + 40);
__m256 _k06 = _mm256_loadu_ps(kptr + 48);
__m256 _k07 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r000, _k00, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r001, _k01, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r002, _k02, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r003, _k03, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r004, _k04, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r005, _k05, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r006, _k06, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r007, _k07, _sum01);
__m256 _r010 = _mm256_broadcast_ss(r0 + 8);
__m256 _r011 = _mm256_broadcast_ss(r0 + 9);
__m256 _r012 = _mm256_broadcast_ss(r0 + 10);
__m256 _r013 = _mm256_broadcast_ss(r0 + 11);
__m256 _r014 = _mm256_broadcast_ss(r0 + 12);
__m256 _r015 = _mm256_broadcast_ss(r0 + 13);
__m256 _r016 = _mm256_broadcast_ss(r0 + 14);
__m256 _r017 = _mm256_broadcast_ss(r0 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r010, _k00, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r011, _k01, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r012, _k02, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r013, _k03, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r014, _k04, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r015, _k05, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r016, _k06, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r017, _k07, _sum11);
__m256 _k10 = _mm256_loadu_ps(kptr);
__m256 _k11 = _mm256_loadu_ps(kptr + 8);
__m256 _k12 = _mm256_loadu_ps(kptr + 16);
__m256 _k13 = _mm256_loadu_ps(kptr + 24);
__m256 _k14 = _mm256_loadu_ps(kptr + 32);
__m256 _k15 = _mm256_loadu_ps(kptr + 40);
__m256 _k16 = _mm256_loadu_ps(kptr + 48);
__m256 _k17 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r010, _k10, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r011, _k11, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r012, _k12, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r013, _k13, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r014, _k14, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r015, _k15, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r016, _k16, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r017, _k17, _sum01);
__m256 _r020 = _mm256_broadcast_ss(r0 + 16);
__m256 _r021 = _mm256_broadcast_ss(r0 + 17);
__m256 _r022 = _mm256_broadcast_ss(r0 + 18);
__m256 _r023 = _mm256_broadcast_ss(r0 + 19);
__m256 _r024 = _mm256_broadcast_ss(r0 + 20);
__m256 _r025 = _mm256_broadcast_ss(r0 + 21);
__m256 _r026 = _mm256_broadcast_ss(r0 + 22);
__m256 _r027 = _mm256_broadcast_ss(r0 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r020, _k10, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r021, _k11, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r022, _k12, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r023, _k13, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r024, _k14, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r025, _k15, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r026, _k16, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r027, _k17, _sum11);
__m256 _k20 = _mm256_loadu_ps(kptr);
__m256 _k21 = _mm256_loadu_ps(kptr + 8);
__m256 _k22 = _mm256_loadu_ps(kptr + 16);
__m256 _k23 = _mm256_loadu_ps(kptr + 24);
__m256 _k24 = _mm256_loadu_ps(kptr + 32);
__m256 _k25 = _mm256_loadu_ps(kptr + 40);
__m256 _k26 = _mm256_loadu_ps(kptr + 48);
__m256 _k27 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r020, _k20, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r021, _k21, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r022, _k22, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r023, _k23, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r024, _k24, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r025, _k25, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r026, _k26, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r027, _k27, _sum01);
__m256 _r030 = _mm256_broadcast_ss(r0 + 24);
__m256 _r031 = _mm256_broadcast_ss(r0 + 25);
__m256 _r032 = _mm256_broadcast_ss(r0 + 26);
__m256 _r033 = _mm256_broadcast_ss(r0 + 27);
__m256 _r034 = _mm256_broadcast_ss(r0 + 28);
__m256 _r035 = _mm256_broadcast_ss(r0 + 29);
__m256 _r036 = _mm256_broadcast_ss(r0 + 30);
__m256 _r037 = _mm256_broadcast_ss(r0 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r030, _k20, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r031, _k21, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r032, _k22, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r033, _k23, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r034, _k24, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r035, _k25, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r036, _k26, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r037, _k27, _sum11);
__m256 _r100 = _mm256_broadcast_ss(r1 + 0);
__m256 _r101 = _mm256_broadcast_ss(r1 + 1);
__m256 _r102 = _mm256_broadcast_ss(r1 + 2);
__m256 _r103 = _mm256_broadcast_ss(r1 + 3);
__m256 _r104 = _mm256_broadcast_ss(r1 + 4);
__m256 _r105 = _mm256_broadcast_ss(r1 + 5);
__m256 _r106 = _mm256_broadcast_ss(r1 + 6);
__m256 _r107 = _mm256_broadcast_ss(r1 + 7);
__m256 _k30 = _mm256_loadu_ps(kptr);
__m256 _k31 = _mm256_loadu_ps(kptr + 8);
__m256 _k32 = _mm256_loadu_ps(kptr + 16);
__m256 _k33 = _mm256_loadu_ps(kptr + 24);
__m256 _k34 = _mm256_loadu_ps(kptr + 32);
__m256 _k35 = _mm256_loadu_ps(kptr + 40);
__m256 _k36 = _mm256_loadu_ps(kptr + 48);
__m256 _k37 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r100, _k30, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r101, _k31, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r102, _k32, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r103, _k33, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r104, _k34, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r105, _k35, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r106, _k36, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r107, _k37, _sum01);
__m256 _r110 = _mm256_broadcast_ss(r1 + 8);
__m256 _r111 = _mm256_broadcast_ss(r1 + 9);
__m256 _r112 = _mm256_broadcast_ss(r1 + 10);
__m256 _r113 = _mm256_broadcast_ss(r1 + 11);
__m256 _r114 = _mm256_broadcast_ss(r1 + 12);
__m256 _r115 = _mm256_broadcast_ss(r1 + 13);
__m256 _r116 = _mm256_broadcast_ss(r1 + 14);
__m256 _r117 = _mm256_broadcast_ss(r1 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r110, _k30, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r111, _k31, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r112, _k32, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r113, _k33, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r114, _k34, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r115, _k35, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r116, _k36, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r117, _k37, _sum11);
__m256 _k40 = _mm256_loadu_ps(kptr);
__m256 _k41 = _mm256_loadu_ps(kptr + 8);
__m256 _k42 = _mm256_loadu_ps(kptr + 16);
__m256 _k43 = _mm256_loadu_ps(kptr + 24);
__m256 _k44 = _mm256_loadu_ps(kptr + 32);
__m256 _k45 = _mm256_loadu_ps(kptr + 40);
__m256 _k46 = _mm256_loadu_ps(kptr + 48);
__m256 _k47 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r110, _k40, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r111, _k41, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r112, _k42, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r113, _k43, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r114, _k44, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r115, _k45, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r116, _k46, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r117, _k47, _sum01);
__m256 _r120 = _mm256_broadcast_ss(r1 + 16);
__m256 _r121 = _mm256_broadcast_ss(r1 + 17);
__m256 _r122 = _mm256_broadcast_ss(r1 + 18);
__m256 _r123 = _mm256_broadcast_ss(r1 + 19);
__m256 _r124 = _mm256_broadcast_ss(r1 + 20);
__m256 _r125 = _mm256_broadcast_ss(r1 + 21);
__m256 _r126 = _mm256_broadcast_ss(r1 + 22);
__m256 _r127 = _mm256_broadcast_ss(r1 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r120, _k40, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r121, _k41, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r122, _k42, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r123, _k43, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r124, _k44, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r125, _k45, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r126, _k46, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r127, _k47, _sum11);
__m256 _k50 = _mm256_loadu_ps(kptr);
__m256 _k51 = _mm256_loadu_ps(kptr + 8);
__m256 _k52 = _mm256_loadu_ps(kptr + 16);
__m256 _k53 = _mm256_loadu_ps(kptr + 24);
__m256 _k54 = _mm256_loadu_ps(kptr + 32);
__m256 _k55 = _mm256_loadu_ps(kptr + 40);
__m256 _k56 = _mm256_loadu_ps(kptr + 48);
__m256 _k57 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r120, _k50, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r121, _k51, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r122, _k52, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r123, _k53, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r124, _k54, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r125, _k55, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r126, _k56, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r127, _k57, _sum01);
__m256 _r130 = _mm256_broadcast_ss(r1 + 24);
__m256 _r131 = _mm256_broadcast_ss(r1 + 25);
__m256 _r132 = _mm256_broadcast_ss(r1 + 26);
__m256 _r133 = _mm256_broadcast_ss(r1 + 27);
__m256 _r134 = _mm256_broadcast_ss(r1 + 28);
__m256 _r135 = _mm256_broadcast_ss(r1 + 29);
__m256 _r136 = _mm256_broadcast_ss(r1 + 30);
__m256 _r137 = _mm256_broadcast_ss(r1 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r130, _k50, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r131, _k51, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r132, _k52, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r133, _k53, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r134, _k54, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r135, _k55, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r136, _k56, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r137, _k57, _sum11);
__m256 _r200 = _mm256_broadcast_ss(r2 + 0);
__m256 _r201 = _mm256_broadcast_ss(r2 + 1);
__m256 _r202 = _mm256_broadcast_ss(r2 + 2);
__m256 _r203 = _mm256_broadcast_ss(r2 + 3);
__m256 _r204 = _mm256_broadcast_ss(r2 + 4);
__m256 _r205 = _mm256_broadcast_ss(r2 + 5);
__m256 _r206 = _mm256_broadcast_ss(r2 + 6);
__m256 _r207 = _mm256_broadcast_ss(r2 + 7);
__m256 _k60 = _mm256_loadu_ps(kptr);
__m256 _k61 = _mm256_loadu_ps(kptr + 8);
__m256 _k62 = _mm256_loadu_ps(kptr + 16);
__m256 _k63 = _mm256_loadu_ps(kptr + 24);
__m256 _k64 = _mm256_loadu_ps(kptr + 32);
__m256 _k65 = _mm256_loadu_ps(kptr + 40);
__m256 _k66 = _mm256_loadu_ps(kptr + 48);
__m256 _k67 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r200, _k60, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r201, _k61, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r202, _k62, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r203, _k63, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r204, _k64, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r205, _k65, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r206, _k66, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r207, _k67, _sum01);
__m256 _r210 = _mm256_broadcast_ss(r2 + 8);
__m256 _r211 = _mm256_broadcast_ss(r2 + 9);
__m256 _r212 = _mm256_broadcast_ss(r2 + 10);
__m256 _r213 = _mm256_broadcast_ss(r2 + 11);
__m256 _r214 = _mm256_broadcast_ss(r2 + 12);
__m256 _r215 = _mm256_broadcast_ss(r2 + 13);
__m256 _r216 = _mm256_broadcast_ss(r2 + 14);
__m256 _r217 = _mm256_broadcast_ss(r2 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r210, _k60, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r211, _k61, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r212, _k62, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r213, _k63, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r214, _k64, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r215, _k65, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r216, _k66, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r217, _k67, _sum11);
__m256 _k70 = _mm256_loadu_ps(kptr);
__m256 _k71 = _mm256_loadu_ps(kptr + 8);
__m256 _k72 = _mm256_loadu_ps(kptr + 16);
__m256 _k73 = _mm256_loadu_ps(kptr + 24);
__m256 _k74 = _mm256_loadu_ps(kptr + 32);
__m256 _k75 = _mm256_loadu_ps(kptr + 40);
__m256 _k76 = _mm256_loadu_ps(kptr + 48);
__m256 _k77 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r210, _k70, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r211, _k71, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r212, _k72, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r213, _k73, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r214, _k74, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r215, _k75, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r216, _k76, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r217, _k77, _sum01);
__m256 _r220 = _mm256_broadcast_ss(r2 + 16);
__m256 _r221 = _mm256_broadcast_ss(r2 + 17);
__m256 _r222 = _mm256_broadcast_ss(r2 + 18);
__m256 _r223 = _mm256_broadcast_ss(r2 + 19);
__m256 _r224 = _mm256_broadcast_ss(r2 + 20);
__m256 _r225 = _mm256_broadcast_ss(r2 + 21);
__m256 _r226 = _mm256_broadcast_ss(r2 + 22);
__m256 _r227 = _mm256_broadcast_ss(r2 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r220, _k70, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r221, _k71, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r222, _k72, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r223, _k73, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r224, _k74, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r225, _k75, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r226, _k76, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r227, _k77, _sum11);
__m256 _k80 = _mm256_loadu_ps(kptr);
__m256 _k81 = _mm256_loadu_ps(kptr + 8);
__m256 _k82 = _mm256_loadu_ps(kptr + 16);
__m256 _k83 = _mm256_loadu_ps(kptr + 24);
__m256 _k84 = _mm256_loadu_ps(kptr + 32);
__m256 _k85 = _mm256_loadu_ps(kptr + 40);
__m256 _k86 = _mm256_loadu_ps(kptr + 48);
__m256 _k87 = _mm256_loadu_ps(kptr + 56);
_sum00 = _mm256_comp_fmadd_ps(_r220, _k80, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r221, _k81, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r222, _k82, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r223, _k83, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r224, _k84, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r225, _k85, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r226, _k86, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r227, _k87, _sum01);
__m256 _r230 = _mm256_broadcast_ss(r2 + 24);
__m256 _r231 = _mm256_broadcast_ss(r2 + 25);
__m256 _r232 = _mm256_broadcast_ss(r2 + 26);
__m256 _r233 = _mm256_broadcast_ss(r2 + 27);
__m256 _r234 = _mm256_broadcast_ss(r2 + 28);
__m256 _r235 = _mm256_broadcast_ss(r2 + 29);
__m256 _r236 = _mm256_broadcast_ss(r2 + 30);
__m256 _r237 = _mm256_broadcast_ss(r2 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r230, _k80, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r231, _k81, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r232, _k82, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r233, _k83, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r234, _k84, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r235, _k85, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r236, _k86, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r237, _k87, _sum11);
kptr -= 64 * 8;
_sum00 = _mm256_add_ps(_sum00, _sum01);
_sum10 = _mm256_add_ps(_sum10, _sum11);
_mm256_storeu_ps(outptr, _sum00);
_mm256_storeu_ps(outptr + 8, _sum10);
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(outptr);
__m256 _sum1 = _mm256_setzero_ps();
__m256 _r000 = _mm256_broadcast_ss(r0 + 0);
__m256 _r001 = _mm256_broadcast_ss(r0 + 1);
__m256 _r002 = _mm256_broadcast_ss(r0 + 2);
__m256 _r003 = _mm256_broadcast_ss(r0 + 3);
__m256 _r004 = _mm256_broadcast_ss(r0 + 4);
__m256 _r005 = _mm256_broadcast_ss(r0 + 5);
__m256 _r006 = _mm256_broadcast_ss(r0 + 6);
__m256 _r007 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = _mm256_loadu_ps(kptr);
__m256 _k01 = _mm256_loadu_ps(kptr + 8);
__m256 _k02 = _mm256_loadu_ps(kptr + 16);
__m256 _k03 = _mm256_loadu_ps(kptr + 24);
__m256 _k04 = _mm256_loadu_ps(kptr + 32);
__m256 _k05 = _mm256_loadu_ps(kptr + 40);
__m256 _k06 = _mm256_loadu_ps(kptr + 48);
__m256 _k07 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r000, _k00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r001, _k01, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r002, _k02, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r003, _k03, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r004, _k04, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r005, _k05, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r006, _k06, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r007, _k07, _sum1);
__m256 _r010 = _mm256_broadcast_ss(r0 + 8);
__m256 _r011 = _mm256_broadcast_ss(r0 + 9);
__m256 _r012 = _mm256_broadcast_ss(r0 + 10);
__m256 _r013 = _mm256_broadcast_ss(r0 + 11);
__m256 _r014 = _mm256_broadcast_ss(r0 + 12);
__m256 _r015 = _mm256_broadcast_ss(r0 + 13);
__m256 _r016 = _mm256_broadcast_ss(r0 + 14);
__m256 _r017 = _mm256_broadcast_ss(r0 + 15);
__m256 _k10 = _mm256_loadu_ps(kptr);
__m256 _k11 = _mm256_loadu_ps(kptr + 8);
__m256 _k12 = _mm256_loadu_ps(kptr + 16);
__m256 _k13 = _mm256_loadu_ps(kptr + 24);
__m256 _k14 = _mm256_loadu_ps(kptr + 32);
__m256 _k15 = _mm256_loadu_ps(kptr + 40);
__m256 _k16 = _mm256_loadu_ps(kptr + 48);
__m256 _k17 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r010, _k10, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r011, _k11, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r012, _k12, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r013, _k13, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r014, _k14, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r015, _k15, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r016, _k16, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r017, _k17, _sum1);
__m256 _r020 = _mm256_broadcast_ss(r0 + 16);
__m256 _r021 = _mm256_broadcast_ss(r0 + 17);
__m256 _r022 = _mm256_broadcast_ss(r0 + 18);
__m256 _r023 = _mm256_broadcast_ss(r0 + 19);
__m256 _r024 = _mm256_broadcast_ss(r0 + 20);
__m256 _r025 = _mm256_broadcast_ss(r0 + 21);
__m256 _r026 = _mm256_broadcast_ss(r0 + 22);
__m256 _r027 = _mm256_broadcast_ss(r0 + 23);
__m256 _k20 = _mm256_loadu_ps(kptr);
__m256 _k21 = _mm256_loadu_ps(kptr + 8);
__m256 _k22 = _mm256_loadu_ps(kptr + 16);
__m256 _k23 = _mm256_loadu_ps(kptr + 24);
__m256 _k24 = _mm256_loadu_ps(kptr + 32);
__m256 _k25 = _mm256_loadu_ps(kptr + 40);
__m256 _k26 = _mm256_loadu_ps(kptr + 48);
__m256 _k27 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r020, _k20, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r021, _k21, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r022, _k22, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r023, _k23, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r024, _k24, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r025, _k25, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r026, _k26, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r027, _k27, _sum1);
__m256 _r100 = _mm256_broadcast_ss(r1 + 0);
__m256 _r101 = _mm256_broadcast_ss(r1 + 1);
__m256 _r102 = _mm256_broadcast_ss(r1 + 2);
__m256 _r103 = _mm256_broadcast_ss(r1 + 3);
__m256 _r104 = _mm256_broadcast_ss(r1 + 4);
__m256 _r105 = _mm256_broadcast_ss(r1 + 5);
__m256 _r106 = _mm256_broadcast_ss(r1 + 6);
__m256 _r107 = _mm256_broadcast_ss(r1 + 7);
__m256 _k30 = _mm256_loadu_ps(kptr);
__m256 _k31 = _mm256_loadu_ps(kptr + 8);
__m256 _k32 = _mm256_loadu_ps(kptr + 16);
__m256 _k33 = _mm256_loadu_ps(kptr + 24);
__m256 _k34 = _mm256_loadu_ps(kptr + 32);
__m256 _k35 = _mm256_loadu_ps(kptr + 40);
__m256 _k36 = _mm256_loadu_ps(kptr + 48);
__m256 _k37 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r100, _k30, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r101, _k31, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r102, _k32, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r103, _k33, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r104, _k34, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r105, _k35, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r106, _k36, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r107, _k37, _sum1);
__m256 _r110 = _mm256_broadcast_ss(r1 + 8);
__m256 _r111 = _mm256_broadcast_ss(r1 + 9);
__m256 _r112 = _mm256_broadcast_ss(r1 + 10);
__m256 _r113 = _mm256_broadcast_ss(r1 + 11);
__m256 _r114 = _mm256_broadcast_ss(r1 + 12);
__m256 _r115 = _mm256_broadcast_ss(r1 + 13);
__m256 _r116 = _mm256_broadcast_ss(r1 + 14);
__m256 _r117 = _mm256_broadcast_ss(r1 + 15);
__m256 _k40 = _mm256_loadu_ps(kptr);
__m256 _k41 = _mm256_loadu_ps(kptr + 8);
__m256 _k42 = _mm256_loadu_ps(kptr + 16);
__m256 _k43 = _mm256_loadu_ps(kptr + 24);
__m256 _k44 = _mm256_loadu_ps(kptr + 32);
__m256 _k45 = _mm256_loadu_ps(kptr + 40);
__m256 _k46 = _mm256_loadu_ps(kptr + 48);
__m256 _k47 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r110, _k40, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r111, _k41, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r112, _k42, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r113, _k43, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r114, _k44, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r115, _k45, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r116, _k46, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r117, _k47, _sum1);
__m256 _r120 = _mm256_broadcast_ss(r1 + 16);
__m256 _r121 = _mm256_broadcast_ss(r1 + 17);
__m256 _r122 = _mm256_broadcast_ss(r1 + 18);
__m256 _r123 = _mm256_broadcast_ss(r1 + 19);
__m256 _r124 = _mm256_broadcast_ss(r1 + 20);
__m256 _r125 = _mm256_broadcast_ss(r1 + 21);
__m256 _r126 = _mm256_broadcast_ss(r1 + 22);
__m256 _r127 = _mm256_broadcast_ss(r1 + 23);
__m256 _k50 = _mm256_loadu_ps(kptr);
__m256 _k51 = _mm256_loadu_ps(kptr + 8);
__m256 _k52 = _mm256_loadu_ps(kptr + 16);
__m256 _k53 = _mm256_loadu_ps(kptr + 24);
__m256 _k54 = _mm256_loadu_ps(kptr + 32);
__m256 _k55 = _mm256_loadu_ps(kptr + 40);
__m256 _k56 = _mm256_loadu_ps(kptr + 48);
__m256 _k57 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r120, _k50, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r121, _k51, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r122, _k52, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r123, _k53, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r124, _k54, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r125, _k55, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r126, _k56, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r127, _k57, _sum1);
__m256 _r200 = _mm256_broadcast_ss(r2 + 0);
__m256 _r201 = _mm256_broadcast_ss(r2 + 1);
__m256 _r202 = _mm256_broadcast_ss(r2 + 2);
__m256 _r203 = _mm256_broadcast_ss(r2 + 3);
__m256 _r204 = _mm256_broadcast_ss(r2 + 4);
__m256 _r205 = _mm256_broadcast_ss(r2 + 5);
__m256 _r206 = _mm256_broadcast_ss(r2 + 6);
__m256 _r207 = _mm256_broadcast_ss(r2 + 7);
__m256 _k60 = _mm256_loadu_ps(kptr);
__m256 _k61 = _mm256_loadu_ps(kptr + 8);
__m256 _k62 = _mm256_loadu_ps(kptr + 16);
__m256 _k63 = _mm256_loadu_ps(kptr + 24);
__m256 _k64 = _mm256_loadu_ps(kptr + 32);
__m256 _k65 = _mm256_loadu_ps(kptr + 40);
__m256 _k66 = _mm256_loadu_ps(kptr + 48);
__m256 _k67 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r200, _k60, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r201, _k61, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r202, _k62, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r203, _k63, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r204, _k64, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r205, _k65, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r206, _k66, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r207, _k67, _sum1);
__m256 _r210 = _mm256_broadcast_ss(r2 + 8);
__m256 _r211 = _mm256_broadcast_ss(r2 + 9);
__m256 _r212 = _mm256_broadcast_ss(r2 + 10);
__m256 _r213 = _mm256_broadcast_ss(r2 + 11);
__m256 _r214 = _mm256_broadcast_ss(r2 + 12);
__m256 _r215 = _mm256_broadcast_ss(r2 + 13);
__m256 _r216 = _mm256_broadcast_ss(r2 + 14);
__m256 _r217 = _mm256_broadcast_ss(r2 + 15);
__m256 _k70 = _mm256_loadu_ps(kptr);
__m256 _k71 = _mm256_loadu_ps(kptr + 8);
__m256 _k72 = _mm256_loadu_ps(kptr + 16);
__m256 _k73 = _mm256_loadu_ps(kptr + 24);
__m256 _k74 = _mm256_loadu_ps(kptr + 32);
__m256 _k75 = _mm256_loadu_ps(kptr + 40);
__m256 _k76 = _mm256_loadu_ps(kptr + 48);
__m256 _k77 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r210, _k70, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r211, _k71, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r212, _k72, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r213, _k73, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r214, _k74, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r215, _k75, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r216, _k76, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r217, _k77, _sum1);
__m256 _r220 = _mm256_broadcast_ss(r2 + 16);
__m256 _r221 = _mm256_broadcast_ss(r2 + 17);
__m256 _r222 = _mm256_broadcast_ss(r2 + 18);
__m256 _r223 = _mm256_broadcast_ss(r2 + 19);
__m256 _r224 = _mm256_broadcast_ss(r2 + 20);
__m256 _r225 = _mm256_broadcast_ss(r2 + 21);
__m256 _r226 = _mm256_broadcast_ss(r2 + 22);
__m256 _r227 = _mm256_broadcast_ss(r2 + 23);
__m256 _k80 = _mm256_loadu_ps(kptr);
__m256 _k81 = _mm256_loadu_ps(kptr + 8);
__m256 _k82 = _mm256_loadu_ps(kptr + 16);
__m256 _k83 = _mm256_loadu_ps(kptr + 24);
__m256 _k84 = _mm256_loadu_ps(kptr + 32);
__m256 _k85 = _mm256_loadu_ps(kptr + 40);
__m256 _k86 = _mm256_loadu_ps(kptr + 48);
__m256 _k87 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_comp_fmadd_ps(_r220, _k80, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r221, _k81, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r222, _k82, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r223, _k83, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r224, _k84, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r225, _k85, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r226, _k86, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r227, _k87, _sum1);
kptr -= 64 * 8;
_sum0 = _mm256_add_ps(_sum0, _sum1);
_mm256_storeu_ps(outptr, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 8;
}
r0 += 16;
r1 += 16;
r2 += 16;
}
}
}
}
static void conv3x3s1_winograd63_transform_kernel_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8b-8a-inch/8a-64-outch/8b
kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)4u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 7 < inch; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00[8] = k01[k];
g00[9] = k11[k];
g00[10] = k21[k];
g00[11] = k31[k];
g00[12] = k41[k];
g00[13] = k51[k];
g00[14] = k61[k];
g00[15] = k71[k];
g00[16] = k02[k];
g00[17] = k12[k];
g00[18] = k22[k];
g00[19] = k32[k];
g00[20] = k42[k];
g00[21] = k52[k];
g00[22] = k62[k];
g00[23] = k72[k];
g00[24] = k03[k];
g00[25] = k13[k];
g00[26] = k23[k];
g00[27] = k33[k];
g00[28] = k43[k];
g00[29] = k53[k];
g00[30] = k63[k];
g00[31] = k73[k];
g00[32] = k04[k];
g00[33] = k14[k];
g00[34] = k24[k];
g00[35] = k34[k];
g00[36] = k44[k];
g00[37] = k54[k];
g00[38] = k64[k];
g00[39] = k74[k];
g00[40] = k05[k];
g00[41] = k15[k];
g00[42] = k25[k];
g00[43] = k35[k];
g00[44] = k45[k];
g00[45] = k55[k];
g00[46] = k65[k];
g00[47] = k75[k];
g00[48] = k06[k];
g00[49] = k16[k];
g00[50] = k26[k];
g00[51] = k36[k];
g00[52] = k46[k];
g00[53] = k56[k];
g00[54] = k66[k];
g00[55] = k76[k];
g00[56] = k07[k];
g00[57] = k17[k];
g00[58] = k27[k];
g00[59] = k37[k];
g00[60] = k47[k];
g00[61] = k57[k];
g00[62] = k67[k];
g00[63] = k77[k];
g00 += 64;
}
}
}
}
static void conv3x3s1_winograd63_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_pack8_avx(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x12
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 16);
__m256 _r3 = _mm256_load_ps(r0 + 24);
__m256 _r4 = _mm256_load_ps(r0 + 32);
__m256 _r5 = _mm256_load_ps(r0 + 40);
__m256 _r6 = _mm256_load_ps(r0 + 48);
__m256 _r7 = _mm256_load_ps(r0 + 56);
__m256 _r8 = _mm256_load_ps(r0 + 64);
__m256 _r9 = _mm256_load_ps(r0 + 72);
__m256 _ra = _mm256_load_ps(r0 + 80);
__m256 _rb = _mm256_load_ps(r0 + 88);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_unpacklo_ps(_r8, _r9);
__m256 _tmp9 = _mm256_unpackhi_ps(_r8, _r9);
__m256 _tmpa = _mm256_unpacklo_ps(_ra, _rb);
__m256 _tmpb = _mm256_unpackhi_ps(_ra, _rb);
__m256 _tmpc = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpg = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmph = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpi = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpj = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpk = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpl = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpm = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpn = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r5 = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 2, 0, 0));
_r6 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r8 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 3, 0, 1));
_r9 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 3, 0, 1));
_ra = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_rb = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
_mm256_store_ps(tmpptr + 8 * 8, _r8);
_mm256_store_ps(tmpptr + 8 * 9, _r9);
_mm256_store_ps(tmpptr + 8 * 10, _ra);
_mm256_store_ps(tmpptr + 8 * 11, _rb);
tmpptr += 96;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(r0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(r0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(r0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(r0 + 8 * 7);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1));
_r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
tmpptr += 64;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1));
_r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
tmpptr += 32;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x2
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
_r0 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
tmpptr += 16;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _val = _mm256_load_ps(r0);
_mm256_store_ps(tmpptr, _val);
tmpptr += 8;
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
__m256 _sum8 = _mm256_setzero_ps();
__m256 _sum9 = _mm256_setzero_ps();
__m256 _suma = _mm256_setzero_ps();
__m256 _sumb = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
__m256 _val8 = _mm256_broadcast_ss(r0 + 8);
__m256 _val9 = _mm256_broadcast_ss(r0 + 9);
_sum8 = _mm256_comp_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_val9, _w0, _sum9);
__m256 _vala = _mm256_broadcast_ss(r0 + 10);
__m256 _valb = _mm256_broadcast_ss(r0 + 11);
_suma = _mm256_comp_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm256_comp_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
_mm256_store_ps(output0_tm + 8 * 8, _sum8);
_mm256_store_ps(output0_tm + 8 * 9, _sum9);
_mm256_store_ps(output0_tm + 8 * 10, _suma);
_mm256_store_ps(output0_tm + 8 * 11, _sumb);
output0_tm += 8 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
output0_tm += 8 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
output0_tm += 8 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
output0_tm += 8 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
output0_tm += 8;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_pack8_avx(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8b-8a-inch/8a-36-outch/8b
kernel_tm_pack4.create(inch / 8, 36, outch / 8, (size_t)4u * 64, 64);
for (int q = 0; q + (8 - 1) < outch; q += 8)
{
Mat g0 = kernel_tm_pack4.channel(q / 8);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (8 - 1) < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
for (int j = 0; j < 8; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd43_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_pack8_avx(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x12
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(r0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(r0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(r0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(r0 + 8 * 7);
__m256 _r8 = _mm256_load_ps(r0 + 8 * 8);
__m256 _r9 = _mm256_load_ps(r0 + 8 * 9);
__m256 _ra = _mm256_load_ps(r0 + 8 * 10);
__m256 _rb = _mm256_load_ps(r0 + 8 * 11);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_unpacklo_ps(_r8, _r9);
__m256 _tmp9 = _mm256_unpackhi_ps(_r8, _r9);
__m256 _tmpa = _mm256_unpacklo_ps(_ra, _rb);
__m256 _tmpb = _mm256_unpackhi_ps(_ra, _rb);
__m256 _tmpc = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpg = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmph = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpi = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpj = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpk = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpl = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpm = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpn = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r5 = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 2, 0, 0));
_r6 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r8 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 3, 0, 1));
_r9 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 3, 0, 1));
_ra = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_rb = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
_mm256_store_ps(tmpptr + 8 * 8, _r8);
_mm256_store_ps(tmpptr + 8 * 9, _r9);
_mm256_store_ps(tmpptr + 8 * 10, _ra);
_mm256_store_ps(tmpptr + 8 * 11, _rb);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 96;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(r0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(r0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(r0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(r0 + 8 * 7);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1));
_r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 64;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1));
_r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 32;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x2
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
_r0 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _val = _mm256_load_ps(r0);
_mm256_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
__m256 _sum8 = _mm256_setzero_ps();
__m256 _sum9 = _mm256_setzero_ps();
__m256 _suma = _mm256_setzero_ps();
__m256 _sumb = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
__m256 _val8 = _mm256_broadcast_ss(r0 + 8);
__m256 _val9 = _mm256_broadcast_ss(r0 + 9);
_sum8 = _mm256_comp_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_val9, _w0, _sum9);
__m256 _vala = _mm256_broadcast_ss(r0 + 10);
__m256 _valb = _mm256_broadcast_ss(r0 + 11);
_suma = _mm256_comp_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm256_comp_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
_mm256_store_ps(output0_tm + 8 * 8, _sum8);
_mm256_store_ps(output0_tm + 8 * 9, _sum9);
_mm256_store_ps(output0_tm + 8 * 10, _suma);
_mm256_store_ps(output0_tm + 8 * 11, _sumb);
output0_tm += 8 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
output0_tm += 8 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
output0_tm += 8 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
output0_tm += 8 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
output0_tm += 8;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_pack8_avx(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
SE1P_direct_k0.c | #include "mex.h"
#include "SE_direct.h"
#include "mathint.h"
#define IDX prhs[0]
#define X prhs[1] // Source locations
#define Q prhs[2] // Source strengths
#define OPT prhs[3] // Parameters
#define PHI plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
/* common option-unpacking */
void unpack_opt(ewald_opts* opt, const mxArray* mx_opt)
{
// mandatory options -- will trigger core dump if missing
opt->xi = mxGetScalar(mxGetField(mx_opt,0,"xi"));
if(opt->xi==0)
mexErrMsgTxt("xi cannot be zero");
double* box = mxGetPr(mxGetField(mx_opt,0,"box"));
opt->box[0] = box[0];
}
// MATLAB (one-based, doubles) to C (zero-based, integers) index translation
void index_translation(int* idx, const double* idx_d, int N)
{
for(int i=0; i<N; i++)
idx[i] = (int)idx_d[i] - 1;
}
#ifdef FORCE
void SE1P_direct_k0(double* restrict force,
const int* restrict idx, int nidx,
const double* restrict x,
const double* restrict q, int N,
const ewald_opts opt)
{
double rho2,xm[3];
const double xi = opt.xi;
for(int m=0; m<nidx; m++)
{
double force_y=0, force_z=0;
xm[0] = x[idx[m] ];
xm[1] = x[idx[m]+N ];
xm[2] = x[idx[m]+2*N];
for(int n=0; n<N; n++)
{
rho2 = ( (xm[1]-x[n+N ])*(xm[1]-x[n+N ]) +
(xm[2]-x[n+2*N])*(xm[2]-x[n+2*N]) );
if(rho2==0)
continue;
force_y += q[n]*2.*(xm[1]-x[n+N] )/rho2*(1-exp(-rho2*xi*xi));
force_z += q[n]*2.*(xm[2]-x[n+2*N])/rho2*(1-exp(-rho2*xi*xi));
}
force[m ] = 0;
force[m+ nidx] = -force_y/opt.box[0];
force[m+2*nidx] = -force_z/opt.box[0];
}
}
#else
void SE1P_direct_k0(double* restrict phi,
const int* restrict idx, int nidx,
const double* restrict x,
const double* restrict q, int N,
const ewald_opts opt)
{
double p;
const double xi = opt.xi;
double egamma = 0.57721566490153286061;
#ifdef _OPENMP
#pragma omp parallel for private(p)
#endif
for(int m=0; m<nidx; m++) {
p=0;
double xm[3] = {x[idx[m] ],
x[idx[m]+N ],
x[idx[m]+2*N]};
for(int n=0; n<N; n++) {
double rho2 = ( (xm[1]-x[n+N ])*(xm[1]-x[n+N ]) +
(xm[2]-x[n+2*N])*(xm[2]-x[n+2*N]) );
if(rho2>34)
p += -q[n]*(log(rho2*xi*xi)+egamma);
else if(rho2>__DBL_EPSILON__)
p += -q[n]*(gsl_sf_expint_E1(rho2*xi*xi)+log(rho2*xi*xi)+egamma);
}
phi[m] = p/opt.box[0];
}
}
#endif
/* no input checking is done */
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
// input dims
const int N = mxGetM(X);
const int num_eval = mxGetN(IDX); // FIXME: indices assumed to be row vec
const double* idx_d = mxGetPr(IDX);
int* idx = mxMalloc(num_eval*sizeof(int));
index_translation(idx, idx_d, num_eval);
const double* x = mxGetPr(X);
const double* q = mxGetPr(Q);
#ifndef FORCE
PHI = mxCreateDoubleMatrix(num_eval, 1, mxREAL);
double* restrict phi = mxGetPr(PHI);
#else
/* This is to allocate 3 vectors for the force.
* (FIXME) Note that the variable is still called PHI.*/
PHI = mxCreateDoubleMatrix(num_eval, 3, mxREAL);
double* restrict phi = mxGetPr(PHI);
#endif
ewald_opts opt;
unpack_opt(&opt, OPT);
if(VERBOSE)
{
mexPrintf("[EWALD (%s)] MEX N=(%d,%d) ","K01P",N,num_eval);
}
// call kernel
SE1P_direct_k0(phi, idx, num_eval, x, q, N, opt);
mxFree(idx);
}
|
DRB055-jacobi2d-parallel-no.c | /**
* jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite.
* Jacobi with array copying, no reduction.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 20x1000. */
#include "polybench/jacobi-2d-imper.h"
/* Array initialization. */
static void init_array(int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c2;
int c1;
if (n >= 1) {
#pragma omp parallel for private(c1, c2)
for (c1 = 0; c1 <= n + -1; c1++) {
#pragma omp parallel for private(c2)
for (c2 = 0; c2 <= n + -1; c2++) {
A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;
B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double A[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",A[i][j]);
if ((i * n + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_jacobi_2d_imper(int tsteps,int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i;
//int j;
//#pragma scop
{
int c2;
int c1;
int c0;
for (c2 = 1; c2 <= 498; c2++) {
B[1][c2] = 0.2 * (A[1][c2] + A[1][c2 - 1] + A[1][1 + c2] + A[1 + 1][c2] + A[1 - 1][c2]);
}
for (c0 = 2; c0 <= 525; c0++) {
if (c0 <= 28) {
if ((2 * c0 + 1) % 3 == 0) {
for (c2 = ((2 * c0 + 1) * 3 < 0?-(-(2 * c0 + 1) / 3) : ((3 < 0?(-(2 * c0 + 1) + - 3 - 1) / - 3 : (2 * c0 + 1 + 3 - 1) / 3))); c2 <= (((2 * c0 + 1492) * 3 < 0?((3 < 0?-((-(2 * c0 + 1492) + 3 + 1) / 3) : -((-(2 * c0 + 1492) + 3 - 1) / 3))) : (2 * c0 + 1492) / 3)); c2++) {
B[1][(-2 * c0 + 3 * c2 + 2) / 3] = 0.2 * (A[1][(-2 * c0 + 3 * c2 + 2) / 3] + A[1][(-2 * c0 + 3 * c2 + 2) / 3 - 1] + A[1][1 + (-2 * c0 + 3 * c2 + 2) / 3] + A[1 + 1][(-2 * c0 + 3 * c2 + 2) / 3] + A[1 - 1][(-2 * c0 + 3 * c2 + 2) / 3]);
}
}
}
#pragma omp parallel for private(c1, c2)
for (c1 = ((((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) > c0 + -9?(((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) : c0 + -9); c1 <= (((((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) < c0?(((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) : c0)); c1++) {
B[-2 * c0 + 3 * c1][1] = 0.2 * (A[-2 * c0 + 3 * c1][1] + A[-2 * c0 + 3 * c1][1 - 1] + A[-2 * c0 + 3 * c1][1 + 1] + A[1 + (-2 * c0 + 3 * c1)][1] + A[-2 * c0 + 3 * c1 - 1][1]);
for (c2 = 2 * c0 + -2 * c1 + 2; c2 <= 2 * c0 + -2 * c1 + 498; c2++) {
A[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1] = B[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1];
B[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] = 0.2 * (A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2 - 1] + A[-2 * c0 + 3 * c1][1 + (-2 * c0 + 2 * c1 + c2)] + A[1 + (-2 * c0 + 3 * c1)][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1 - 1][-2 * c0 + 2 * c1 + c2]);
}
A[-2 * c0 + 3 * c1 + -1][498] = B[-2 * c0 + 3 * c1 + -1][498];
}
if (c0 >= 499) {
if ((2 * c0 + 1) % 3 == 0) {
#pragma omp parallel for private(c2)
for (c2 = ((2 * c0 + -992) * 3 < 0?-(-(2 * c0 + -992) / 3) : ((3 < 0?(-(2 * c0 + -992) + - 3 - 1) / - 3 : (2 * c0 + -992 + 3 - 1) / 3))); c2 <= (((2 * c0 + 499) * 3 < 0?((3 < 0?-((-(2 * c0 + 499) + 3 + 1) / 3) : -((-(2 * c0 + 499) + 3 - 1) / 3))) : (2 * c0 + 499) / 3)); c2++) {
A[498][(-2 * c0 + 3 * c2 + 995) / 3] = B[498][(-2 * c0 + 3 * c2 + 995) / 3];
}
}
}
}
#pragma omp parallel for private(c2)
for (c2 = 20; c2 <= 517; c2++) {
A[498][c2 + -19] = B[498][c2 + -19];
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_jacobi_2d_imper(tsteps,n, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
print_array(n, *A);
/* Be clean. */
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
detector.c | #include "darknet.h"
#include <stdio.h>
#include <dirent.h>
#include <unistd.h>
#include <sys/stat.h>
#define class temp
struct stat st;
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = (network**)calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
if(gpu_index >= 0) {
opencl_set_device(i);
}
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
#ifndef BENCHMARK
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
#endif
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
#ifdef LOSS_ONLY
double time=what_time_is_it_now();
#else
double time;
#endif
int count = 0;
if(count == 0) {
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.start.conv.weights", backup_directory, base);
save_weights(net, buff);
}
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("Resizing\n");
#endif
int dim = (rand() % 10 + 10) * 32;
#ifdef BENCHMARK
dim = 608;
#endif
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
if (net->w < dim && net->h < dim) dim = net->w;
//int dim = (rand() % 4 + 16) * 32;
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("%d\n", dim);
#endif
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
#ifndef LOSS_ONLY
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
#endif
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
float loss = 0;
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus == 1) {
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4, gpus, ngpus);
}
}
else {
loss = train_network(net, train);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
#ifdef LOSS_ONLY
printf("%lf\t%f\n", what_time_is_it_now()-time, loss);
#else
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
#endif
if(i%100==0){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
#ifdef GPU_STATS
opencl_dump_mem_stat();
#endif
#ifdef BENCHMARK
break;
#endif
}
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
free(paths);
free(plist);
free(base);
free(nets);
free(options);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0, 1);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
/*opt: the test tag*/
network_predict(net, X);
sleep(2);
/*opt: the test tag*/
time=what_time_is_it_now();
auto t = clock();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
printf("%s: Predicted in %f seconds.\n", input, ((double)(clock() - t)) / CLOCKS_PER_SEC);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0, 0, "");
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
show_image(im, "predictions", 0);
#endif
}
free_image(im);
if (resize) free_image(sized);
if (filename) break;
}
}
int exists(const char *fname, const char* ext)
{
FILE *file;
if (strstr(fname, ext) && (file = fopen(fname, "r")))
{
fclose(file);
return 1;
}
return 0;
}
int empty(char *dirname) {
int n = 0;
struct dirent *d;
DIR *dir = opendir(dirname);
if (dir == NULL) // not a dir or doesn't exist
return 1;
while ((d = readdir(dir)) != NULL) {
if(++n > 2)
break;
}
closedir(dir);
if (n <= 2) //dir empty
return 1;
else
return 0;
}
#ifdef __cplusplus
__attribute__((noreturn))
#else
_Noreturn
#endif
void test_ddetector(char *datacfg, char *cfgfile, char *weightfile, char *in_dir, float thresh, float hier_thresh, char *out_dir)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
char fname[256];
char ffname[1024];
char ffoname[1024];
struct dirent *de = NULL;
while(1) {
while (empty(in_dir)) {
sleep(1);
}
DIR *dr = opendir(in_dir);
while ((de = readdir(dr)) != NULL) {
printf("%s\n", de->d_name);
strcpy(fname, de->d_name);
strcpy(ffname, in_dir);
strcat(ffname, "/");
strcat(ffname, fname);
if (!exists(ffname, ".jpg")) continue;
if (1) {
strcpy(ffoname, out_dir);
strcat(ffoname, "/");
strcat(ffoname, fname);
int len = strlen(ffoname) - 4;
ffoname[len] = '\0';
strncpy(input, ffname, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if (!input) continue;
strtok(input, "\n");
}
off_t size = 0;
off_t offs = 0;
do {
offs = size;
stat(input, &st);
size = st.st_size;
if (offs != size) sleep(1); else break;
} while (1);
image im = load_image_color(input, 0, 0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n - 1];
float *X = sized.data;
time = what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now() - time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0, 1, ffoname);
free_detections(dets, nboxes);
free_image(im);
if (resize) free_image(sized);
// if (filename) break;
remove(input);
}
closedir(dr);
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = (int*)calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
#undef class |
model_initializer.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_MODEL_INITIALIZER_H_
#define CORE_MODEL_INITIALIZER_H_
#include <ctime>
#include <string>
#include <vector>
#include "core/container/math_array.h"
#include "core/diffusion_grid.h"
#include "core/resource_manager.h"
#include "core/simulation.h"
#include "core/util/random.h"
namespace bdm {
struct ModelInitializer {
/// Creates a 3D cubic grid of agents and adds them to the
/// ExecutionContext. Type of the agent is determined by the return
/// type of parameter agent_builder.
///
/// ModelInitializer::Grid3D(8, 10, [](const Double3& pos){
/// return Cell(pos); });
/// @param agents_per_dim number of agents on each axis.
/// Number of generated agents =
/// `agents_per_dim ^ 3`
/// @param space space between the positions - e.g space = 10:
/// positions = `{(0, 0, 0), (0, 0, 10), (0, 0,
/// 20), ... }`
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void Grid3D(size_t agents_per_dim, double space,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t x = 0; x < agents_per_dim; x++) {
auto x_pos = x * space;
for (size_t y = 0; y < agents_per_dim; y++) {
auto y_pos = y * space;
for (size_t z = 0; z < agents_per_dim; z++) {
auto* new_agent = agent_builder({x_pos, y_pos, z * space});
ctxt->AddAgent(new_agent);
}
}
}
}
}
/// Creates a 3D grid of agents and adds them to the
/// ExecutionContext. Type of the agent is determined by the return
/// type of parameter agent_builder.
///
/// ModelInitializer::Grid3D({8,6,4}, 10, [](const Double3&
/// pos){ return Cell(pos); });
/// @param agents_per_dim number of agents on each axis.
/// Number of generated agents =
/// `agents_per_dim[0] * agents_per_dim[1] *
/// agents_per_dim[2]`
/// @param space space between the positions - e.g space = 10:
/// positions = `{(0, 0, 0), (0, 0, 10), (0, 0,
/// 20), ... }`
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void Grid3D(const std::array<size_t, 3>& agents_per_dim, double space,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t x = 0; x < agents_per_dim[0]; x++) {
auto x_pos = x * space;
for (size_t y = 0; y < agents_per_dim[1]; y++) {
auto y_pos = y * space;
for (size_t z = 0; z < agents_per_dim[2]; z++) {
auto* new_agent = agent_builder({x_pos, y_pos, z * space});
ctxt->AddAgent(new_agent);
}
}
}
}
}
/// Adds agents to the ExecutionContext. Type of the simulation
/// object is determined by the return type of parameter agent_builder.
///
/// @param positions positions of the agents to be
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void CreateAgents(const std::vector<Double3>& positions,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t i = 0; i < positions.size(); i++) {
auto* new_agent =
agent_builder({positions[i][0], positions[i][1], positions[i][2]});
ctxt->AddAgent(new_agent);
}
}
}
/// Adds agents with random positions to the ExecutionContext.
/// Type of the agent is determined by the return type of
/// parameter agent_builder.
///
/// @param[in] min The minimum position value
/// @param[in] max The maximum position value
/// @param[in] num_agents The number agents
/// @param[in] agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void CreateAgentsRandom(double min, double max, uint64_t num_agents,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
auto* random = sim->GetRandom();
#pragma omp for
for (uint64_t i = 0; i < num_agents; i++) {
auto* new_agent = agent_builder(random->UniformArray<3>(min, max));
ctxt->AddAgent(new_agent);
}
}
}
/// Allows agents to secrete the specified substance. Diffusion throughout the
/// simulation space is automatically taken care of by the DiffusionGrid class
///
/// @param[in] substance_id The substance identifier
/// @param[in] substance_name The substance name
/// @param[in] diffusion_coeff The diffusion coefficient
/// @param[in] decay_constant The decay constant
/// @param[in] resolution The resolution of the diffusion grid
///
static void DefineSubstance(size_t substance_id, std::string substance_name,
double diffusion_coeff, double decay_constant,
int resolution = 10) {
assert(resolution > 0 && "Resolution needs to be a positive integer value");
auto* sim = Simulation::GetActive();
auto* rm = sim->GetResourceManager();
DiffusionGrid* d_grid =
new DiffusionGrid(substance_id, substance_name, diffusion_coeff,
decay_constant, resolution);
rm->AddDiffusionGrid(d_grid);
}
template <typename F>
static void InitializeSubstance(size_t substance_id, F function) {
auto* sim = Simulation::GetActive();
auto* rm = sim->GetResourceManager();
auto diffusion_grid = rm->GetDiffusionGrid(substance_id);
diffusion_grid->AddInitializer(function);
}
};
} // namespace bdm
#endif // CORE_MODEL_INITIALIZER_H_
|
custom_functions.h | //
// Project Name: Kratos
// Last Modified by: $Author: G.Casas (gcasas@cimmne.upc.edu) $
// Date: $Date: 2011-6-13 08:56:42 $
// Revision: $Revision: 1.5 $
//
//
//README::::look to the key word "VERSION" if you want to find all the points where you have to change something so that you can pass from a kdtree to a bin data search structure;
#if !defined(KRATOS_CUSTOM_FUNCTIONS)
#define KRATOS_CUSTOM_FUNCTIONS
// /* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
// System includes
#include <vector>
// Project includes
#include "includes/model_part.h"
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
#include "processes/find_elements_neighbours_process.h"
#include "processes/find_nodal_neighbours_process.h"
//Database includes
#include "custom_utilities/search/discrete_particle_configure.h"
#include "includes/define.h"
#include "../../DEMApplication/custom_elements/discrete_element.h"
#include "custom_elements/swimming_particle.h"
#include "custom_utilities/AuxiliaryFunctions.h"
#include "../../DEMApplication/custom_elements/spheric_particle.h"
#include "../swimming_DEM_application.h"
#include "../../../kratos/utilities/geometry_utilities.h"
namespace Kratos
{
template <std::size_t TDim>
class CustomFunctionsCalculator
{
public:
typedef ModelPart::ElementsContainerType::iterator ElementIterator;
typedef ModelPart::NodesContainerType::iterator NodeIterator;
typedef ModelPart::NodesContainerType NodesArrayType;
KRATOS_CLASS_POINTER_DEFINITION(CustomFunctionsCalculator);
CustomFunctionsCalculator(): mPressuresFilled(false), mFirstGradientRecovery(true), mFirstLaplacianRecovery(true), mSomeCloudsDontWork(false), mCalculatingTheGradient(false), mCalculatingTheLaplacian(false), mFirstTimeAppending(true){}
/// Calculator
virtual ~CustomFunctionsCalculator(){}
/// Default calculator
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CalculatePressureGradient(ModelPart& r_model_part)
{
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
noalias(inode->FastGetSolutionStepValue(PRESSURE_GRADIENT)) = ZeroVector(3);
}
array_1d <double, 3> grad = ZeroVector(3); // its dimension is always 3
array_1d <double, TDim + 1 > elemental_pressures;
array_1d <double, TDim + 1 > N; // shape functions vector
BoundedMatrix<double, TDim + 1, TDim> DN_DX;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
// computing the shape function derivatives
Geometry<Node<3> >& geom = ielem->GetGeometry();
double Volume;
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
// getting the pressure gradients;
for (unsigned int i = 0; i < TDim + 1; ++i){
elemental_pressures[i] = geom[i].FastGetSolutionStepValue(PRESSURE);
}
array_1d <double, TDim> grad_aux = prod(trans(DN_DX), elemental_pressures); // its dimension may be 2
for (unsigned int i = 0; i < TDim; ++i){
grad[i] = grad_aux[i];
}
double nodal_area = Volume / static_cast<double>(TDim + 1);
grad *= nodal_area;
for (unsigned int i = 0; i < TDim + 1; ++i){
geom[i].FastGetSolutionStepValue(PRESSURE_GRADIENT) += grad;
}
}
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
inode->FastGetSolutionStepValue(PRESSURE_GRADIENT) /= inode->FastGetSolutionStepValue(NODAL_AREA);
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// This function assesses the stationarity based on the pressure field variation.
// Its tolerance applies to the non-dimensional pressure variation between consecutive
// measurements.
bool AssessStationarity(ModelPart& r_model_part, const double& tol)
{
if (!mPressuresFilled){
PerformFirstStepComputations(r_model_part);
return(false);
}
else {
double max_pressure_change_rate = 0.0; // measure of stationarity
double mean_celerity = 0.0; // used to adimensionalize the time step
// filling up mPressures and calculating the mean velocities and the maximum nodal pressure change
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
const array_1d<double, 3>& velocity = inode->FastGetSolutionStepValue(VELOCITY);
mean_celerity += SWIMMING_MODULUS_3(velocity);
const double new_pressure = inode->FastGetSolutionStepValue(PRESSURE);
double& old_pressure = mPressures[i];
const double delta_p = std::abs(new_pressure - old_pressure);
max_pressure_change_rate = std::max(delta_p, max_pressure_change_rate);
old_pressure = new_pressure;
++i;
}
mean_celerity /= i;
const double delta_t = r_model_part.GetProcessInfo()[TIME] - mLastMeasurementTime;
if (delta_t > 0.0){
max_pressure_change_rate /= delta_t;
// calculating coefficients for adimensionalization of the pressure change rate
const double characteristic_length = std::pow(mTotalDomainVolume, 1.0 / 3); // characteristic length of the model. Should be improved: a hydraulic radius or such
const double reciprocal_of_characteristic_time = mean_celerity / characteristic_length;
const double pressure_spatial_variation = GetRangeWithinVector(mPressures);
mLastPressureVariation = pressure_spatial_variation;
const double characteristic_pressure_variation = 0.5 * (pressure_spatial_variation + mLastPressureVariation);
if (characteristic_pressure_variation == 0.0 || reciprocal_of_characteristic_time == 0.0){ // unlikely
std::cout << "Uniform problem: stationarity check being performed with dimensional values...! " << "\n";
if (max_pressure_change_rate <= tol){ // go with the absolute value
return true;
}
}
max_pressure_change_rate /= reciprocal_of_characteristic_time * characteristic_pressure_variation ;
}
else {
KRATOS_THROW_ERROR(std::runtime_error, "Trying to calculate pressure variations between two coincident time steps! (null time variation since last recorded time)","");
}
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
std::cout << "The stationarity condition tolerance is " << "\n";
KRATOS_INFO("SwimmingDEM") << tol << std::endl;
std::cout << "The stationarity residual is now " << "\n";
KRATOS_INFO("SwimmingDEM") << max_pressure_change_rate << std::endl;
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
return max_pressure_change_rate <= tol;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateDomainVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_volume = 0.0;
#pragma omp parallel for reduction(+ : added_volume)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
added_volume += CalculateElementalVolume(it->GetGeometry());
}
}
return added_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnParticles(ModelPart& r_dem_model_part, array_1d <double, 3>& force)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_dem_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(OpenMPUtils::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_dem_model_part, k); it != GetElementPartitionEnd(r_dem_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
array_1d <double, 3> element_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_FORCE)){
element_force = geom[0].FastGetSolutionStepValue(HYDRODYNAMIC_FORCE);
}
else {
element_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
}
}
force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
force += added_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnFluid(ModelPart& r_fluid_model_part, array_1d <double, 3>& instantaneous_force, array_1d <double, 3>& mean_force)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(OpenMPUtils::GetNumThreads());
std::vector<array_1d <double, 3> > added_mean_force_vect;
added_mean_force_vect.resize(OpenMPUtils::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
added_mean_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
array_1d <double, 3> element_force;
array_1d <double, 3> element_mean_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_force = ZeroVector(3);
}
if (geom[0].SolutionStepsDataHas(MEAN_HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_mean_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, MEAN_HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_mean_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
added_mean_force_vect[k] += element_mean_force;
}
}
instantaneous_force = added_force_vect[0];
mean_force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
instantaneous_force += added_force_vect[k];
mean_force += added_mean_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
double CalculateGlobalFluidVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_fluid_volume = 0.0;
#pragma omp parallel for reduction(+ : added_fluid_volume)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
double element_fluid_volume;
if (geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_fluid_volume = CalculateScalarIntegralOfLinearInterpolation(geom, FLUID_FRACTION, element_volume);
}
else {
element_fluid_volume = CalculateElementalVolume(geom);
}
added_fluid_volume += element_fluid_volume;
}
}
return added_fluid_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
template<class matrix_T>
double determinant(boost::numeric::ublas::matrix_expression<matrix_T> const& mat_r)
{
double det = 1.0;
matrix_T mLu(mat_r() );
boost::numeric::ublas::permutation_matrix<std::size_t> pivots(mat_r().size1() );
int is_singular = lu_factorize(mLu, pivots);
if (!is_singular)
{
for (std::size_t i=0; i < pivots.size(); ++i)
{
if (pivots(i) != i)
det *= -1.0;
det *= mLu(i,i);
}
}
else
det = 0.0;
return det;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
const DenseMatrix<double> Inverse(
const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the inverse of square matrices");
switch(m.size1())
{
case 1:
{
assert(m.size1() == 1 && m.size2() == 1 && "Only for 1x1 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
assert(m(0,0) != 0.0 && "Cannot take the inverse of matrix [0]");
DenseMatrix<double> n(1,1);
n(0,0) = 1.0 / determinant;
return n;
}
case 2:
{
assert(m.size1() == 2 && m.size2() == 2 && "Only for 2x2 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
DenseMatrix<double> n(2,2);
n(0,0) = d / determinant;
n(0,1) = -b / determinant;
n(1,0) = -c / determinant;
n(1,1) = a / determinant;
return n;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
DenseMatrix<double> n(3,3);
const double new_a = ((e*k)-(f*h)) / determinant;
const double new_b = -((d*k)-(f*g)) / determinant;
const double new_c = ((d*h)-(e*g)) / determinant;
const double new_d = -((b*k)-(c*h)) / determinant;
const double new_e = ((a*k)-(c*g)) / determinant;
const double new_f = -((a*h)-(b*g)) / determinant;
const double new_g = ((b*f)-(c*e)) / determinant;
const double new_h = -((a*f)-(c*d)) / determinant;
const double new_k = ((a*e)-(b*d)) / determinant;
n(0,0) = new_a;
n(1,0) = new_b;
n(2,0) = new_c;
n(0,1) = new_d;
n(1,1) = new_e;
n(2,1) = new_f;
n(0,2) = new_g;
n(1,2) = new_h;
n(2,2) = new_k;
return n;
}
default:
{
//Use blockwise inversion
//Matrix::Chop returns a std::vector
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > v = Chop(m);
const DenseMatrix<double>& a = v[0];
assert(a.size1() == a.size2());
const DenseMatrix<double> a_inv = Inverse(a);
const DenseMatrix<double>& b = v[1];
const DenseMatrix<double>& c = v[2];
const DenseMatrix<double>& d = v[3];
const DenseMatrix<double> term
= d
- prod(
DenseMatrix<double>(prod(c,a_inv)),
b
);
const DenseMatrix<double> term_inv = Inverse(term);
const DenseMatrix<double> new_a
= a_inv
+ DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv)),
c)),
a_inv));
const DenseMatrix<double> new_b
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv));
const DenseMatrix<double> new_c
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
term_inv,
c)),
a_inv));
const DenseMatrix<double> new_d = term_inv;
std::vector<DenseMatrix<double> > w;
w.push_back(new_a);
w.push_back(new_b);
w.push_back(new_c);
w.push_back(new_d);
const DenseMatrix<double> result = Unchop(w);
return result;
}
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<double>& origin_variable, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const double& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
destination_value = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<array_1d<double, 3>>& origin_variable, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const array_1d<double, 3>& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
noalias(destination_value) = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const double& value, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
destination_value = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const array_1d<double, 3>& value, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
noalias(destination_value) = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
private:
bool mPressuresFilled;
bool mFirstGradientRecovery;
bool mFirstLaplacianRecovery;
bool mSomeCloudsDontWork;
bool mCalculatingTheGradient;
bool mCalculatingTheLaplacian;
bool mFirstTimeAppending;
double mLastMeasurementTime;
double mLastPressureVariation;
double mTotalDomainVolume;
std::vector<double> mPressures;
std::vector<DenseVector<double> > mFirstRowsOfB;
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateArea(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2)
{
const double x10 = x1 - x0;
const double y10 = y1 - y0;
const double x20 = x2 - x0;
const double y20 = y2 - y0;
const double area = 0.5 * std::abs(x10 * y20 - x20 * y10);
return area;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 +
y10 * z20 * x30 - y10 * x20 * z30 +
z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666666667;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateElementalVolume(const Geometry<Node <3> >& geom)
{
double vol;
if (TDim == 2){
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
vol = CalculateArea(x0, y0, x1, y1, x2, y2);
}
else {
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
}
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found with the current geometry ", geom);
}
return vol;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateScalarIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<double>& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom);
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
double value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var, 0);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom);
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom);
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var) * geom[0].FastGetSolutionStepValue(DENSITY) * geom[0].FastGetSolutionStepValue(FLUID_FRACTION);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var) * geom[i].FastGetSolutionStepValue(DENSITY) * geom[i].FastGetSolutionStepValue(FLUID_FRACTION);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void PerformFirstStepComputations(ModelPart& r_model_part)
{
mTotalDomainVolume = CalculateDomainVolume(r_model_part);
mPressures.resize(r_model_part.Nodes().size());
mLastMeasurementTime = r_model_part.GetProcessInfo()[TIME];
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode) {
mPressures[i] = inode->FastGetSolutionStepValue(PRESSURE);
++i;
}
mPressuresFilled = true;
mLastPressureVariation = GetRangeWithinVector(mPressures);
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
struct IsCloser{
bool operator()(std::pair<unsigned int, double> const& first_pair, std::pair<unsigned int, double> const& second_pair)
{
return(first_pair.second < second_pair.second || (first_pair.second == second_pair.second && first_pair.first < second_pair.first));
}
};
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline int Factorial(const unsigned int n){
if (n == 0){
return 1;
}
unsigned int k = n;
for (unsigned int i = n - 1; i > 0; --i){
k *= i;
}
return k;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMaximumEdgeLength(ModelPart& r_model_part)
{
double max_distance_yet = 0.0;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
max_distance_yet = max_distance_yet > distance_2 ? max_distance_yet : distance_2;
}
}
}
return(std::sqrt(max_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMinumumEdgeLength(ModelPart& r_model_part)
{
double min_distance_yet = 0.0;
bool first_node = true;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
if (first_node){ // assign the distance (squared) between any two nodes to min_distance_yet
array_1d <double, 3> delta = geom[0] - geom[1];
double distance_2 = DEM_INNER_PRODUCT_3(delta, delta);
min_distance_yet = distance_2;
}
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
min_distance_yet = min_distance_yet < distance_2 ? min_distance_yet : distance_2;
}
}
}
return(std::sqrt(min_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// The following block of functions is used to calculate explicit matrix inverses and was taken from
// Richel BilderBeek's website (http://www.richelbilderbeek.nl/CppUblasMatrixExample6.htm), and it is
// transcribed here with a very minor modification
double CalcDeterminant(const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the determinant of square matrices");
switch(m.size1())
{
case 1:
{
return m(0,0);
}
case 2:
{
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
const double determinant = (a * d) - (b * c);
return determinant;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
const double determinant
= (a * ((e*k) - (f*h)))
- (b * ((k*d) - (f*g)))
+ (c * ((d*h) - (e*g)));
return determinant;
}
default:
assert(!"Should not get here: unsupported matrix size");
throw std::runtime_error("Unsupported matrix size");
}
}
///Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > Chop(
const DenseMatrix<double>& m)
{
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
std::vector<matrix<double> > v;
v.reserve(4);
const int midy = m.size1() / 2;
const int midx = m.size2() / 2;
const matrix_range<const matrix<double> > top_left( m,range(0 ,midy ),range(0 ,midx ));
const matrix_range<const matrix<double> > bottom_left( m,range(midy,m.size1()),range(0 ,midx ));
const matrix_range<const matrix<double> > top_right( m,range(0 ,midy ),range(midx,m.size2()));
const matrix_range<const matrix<double> > bottom_right(m,range(midy,m.size1()),range(midx,m.size2()));
v.push_back(matrix<double>(top_left));
v.push_back(matrix<double>(top_right));
v.push_back(matrix<double>(bottom_left));
v.push_back(matrix<double>(bottom_right));
return v;
}
///Unchop merges the 4 std::vector of sub-matrices produced by Chop
const DenseMatrix<double> Unchop(
const std::vector<DenseMatrix<double> >& v)
{
//Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
assert(v.size() == 4);
assert(v[0].size1() == v[1].size1());
assert(v[2].size1() == v[3].size1());
assert(v[0].size2() == v[2].size2());
assert(v[1].size2() == v[3].size2());
DenseMatrix<double> m(v[0].size1() + v[2].size1(),v[0].size2() + v[1].size2());
for (int quadrant=0; quadrant!=4; ++quadrant)
{
const DenseMatrix<double>& w = v[quadrant];
const std::size_t n_rows = v[quadrant].size1();
const std::size_t n_cols = v[quadrant].size2();
const int offset_x = quadrant % 2 ? v[0].size2() : 0;
const int offset_y = quadrant / 2 ? v[0].size1() : 0;
for (std::size_t row=0; row!=n_rows; ++row)
{
for (std::size_t col=0; col!=n_cols; ++col)
{
m(offset_y + row, offset_x + col) = w(row,col);
}
}
}
assert(v[0].size1() + v[2].size1() == m.size1());
assert(v[1].size1() + v[3].size1() == m.size1());
assert(v[0].size2() + v[1].size2() == m.size2());
assert(v[2].size2() + v[3].size2() == m.size2());
return m;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
///@}
///@name Member r_variables
///@{
DenseVector<unsigned int> mElementsPartition;
///@}
///@name Un accessible methods
///@{
double GetRangeWithinVector(const std::vector<double>& vector)
{
double min = vector[0];
double max = vector[0];
for (unsigned int i = 0; i != vector.size(); ++i){
min = std::min(min, mPressures[i]);
max = std::max(max, mPressures[i]);
}
return (max - min);
}
DenseVector<unsigned int>& GetElementPartition()
{
return mElementsPartition;
}
ElementIterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k];
}
ElementIterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k + 1];
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
}; // Class CustomFunctionsCalculator
} // namespace Kratos.
#endif // KRATOS_CREATE_AND_DESTROY defined
|
energy.h | #pragma once
#include "core.h"
#include "geometry.h"
#include "space.h"
#include "potentials.h"
#include "multipole.h"
#include "penalty.h"
#include "mpi.h"
#include <Eigen/Dense>
#include <set>
#ifdef FAU_POWERSASA
#include <power_sasa.h>
#endif
namespace Faunus {
namespace Energy {
class Energybase {
public:
enum keys {OLD, NEW, NONE};
keys key=NONE;
std::string name;
std::string cite;
virtual double energy(Change&)=0; //!< energy due to change
inline virtual void to_json(json &j) const {}; //!< json output
inline virtual void sync(Energybase*, Change&) {}
};
void to_json(json &j, const Energybase &base) {
assert(!base.name.empty());
if (!base.cite.empty())
j[base.name]["reference"] = base.cite;
base.to_json( j[base.name] );
} //!< Converts any energy class to json object
/**
* This holds Ewald setup and must *not* depend on particle type, nor depend on Space
*/
struct EwaldData {
typedef std::complex<double> Tcomplex;
Eigen::Matrix3Xd kVectors; // k-vectors, 3xK
Eigen::VectorXd Aks; // 1xK, to minimize computational effort (Eq.24,DOI:10.1063/1.481216)
Eigen::VectorXcd Qion, Qdip; // 1xK
double alpha, rc, kc, check_k2_zero, lB;
double const_inf, eps_surf;
bool spherical_sum=true;
bool ipbc=false;
int kVectorsInUse=0;
Point L; //!< Box dimensions
void update(const Point &box) {
L = box;
int kcc = std::ceil(kc);
check_k2_zero = 0.1*std::pow(2*pc::pi/L.maxCoeff(), 2);
int kVectorsLength = (2*kcc+1) * (2*kcc+1) * (2*kcc+1) - 1;
if (kVectorsLength == 0) {
kVectors.resize(3,1);
Aks.resize(1);
kVectors.col(0) = Point(1,0,0); // Just so it is not the zero-vector
Aks[0] = 0;
kVectorsInUse = 1;
Qion.resize(1);
Qdip.resize(1);
} else {
double kc2 = kc*kc;
kVectors.resize(3, kVectorsLength);
Aks.resize(kVectorsLength);
kVectorsInUse = 0;
kVectors.setZero();
Aks.setZero();
int startValue = 1 - int(ipbc);
for (int kx = 0; kx <= kcc; kx++) {
double dkx2 = double(kx*kx);
for (int ky = -kcc*startValue; ky <= kcc; ky++) {
double dky2 = double(ky*ky);
for (int kz = -kcc*startValue; kz <= kcc; kz++) {
double factor = 1.0;
if(kx > 0)
factor *= 2;
if(ky > 0 && ipbc)
factor *= 2;
if(kz > 0 && ipbc)
factor *= 2;
double dkz2 = double(kz*kz);
Point kv = 2*pc::pi*Point(kx/L.x(),ky/L.y(),kz/L.z());
double k2 = kv.dot(kv);
if (k2 < check_k2_zero) // Check if k2 != 0
continue;
if (spherical_sum)
if( (dkx2/kc2) + (dky2/kc2) + (dkz2/kc2) > 1)
continue;
kVectors.col(kVectorsInUse) = kv;
Aks[kVectorsInUse] = factor*std::exp(-k2/(4*alpha*alpha))/k2;
kVectorsInUse++;
}
}
}
Qion.resize(kVectorsInUse);
Qdip.resize(kVectorsInUse);
Aks.conservativeResize(kVectorsInUse);
kVectors.conservativeResize(3,kVectorsInUse);
}
}
};
void from_json(const json &j, EwaldData &d) {
d.alpha = j.at("alpha");
d.rc = j.at("cutoff");
d.kc = j.at("kcutoff");
d.ipbc = j.value("ipbc", false);
d.spherical_sum = j.value("spherical_sum", true);
d.lB = pc::lB( j.at("epsr") );
d.eps_surf = j.value("epss", 0.0);
d.const_inf = (d.eps_surf < 1) ? 0 : 1; // if unphysical (<1) use epsr infinity for surrounding medium
}
void to_json(json &j, const EwaldData &d) {
j = {{"lB", d.lB}, {"ipbc", d.ipbc}, {"epss", d.eps_surf},
{"alpha", d.alpha}, {"cutoff", d.rc}, {"kcutoff", d.kc},
{"wavefunctions", d.kVectors.cols()}, {"spherical_sum", d.spherical_sum}};
}
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("[Faunus] Ewald - EwaldData")
{
using doctest::Approx;
EwaldData data = R"({
"ipbc": false, "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0,
"kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json;
data.update( Point(10,10,10) );
CHECK(data.ipbc == false);
CHECK(data.const_inf == 1);
CHECK(data.alpha == 0.894427190999916);
CHECK(data.kVectors.cols() == 2975);
CHECK(data.Qion.size() == data.kVectors.cols());
data.ipbc=true;
data.update( Point(10,10,10) );
CHECK(data.kVectors.cols() == 846);
CHECK(data.Qion.size() == data.kVectors.cols());
}
#endif
/** @brief recipe or policies for ion-ion ewald */
template<class Tspace, bool eigenopt=false /** use Eigen matrix ops where possible */>
struct PolicyIonIon {
typedef typename Tspace::Tpvec::iterator iter;
Tspace *spc;
Tspace *old=nullptr; // set only if key==NEW at first call to `sync()`
PolicyIonIon(Tspace &spc) : spc(&spc) {}
void updateComplex(EwaldData &data) const {
if (eigenopt)
if (data.ipbc==false) {
auto pos = asEigenMatrix(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::pos); // Nx3
auto charge = asEigenVector(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::charge); // Nx1
Eigen::MatrixXd kr = pos.matrix() * data.kVectors; // Nx3 * 3xK = NxK
data.Qion.real() = (kr.array().cos().colwise()*charge).colwise().sum();
data.Qion.imag() = kr.array().sin().colwise().sum();
return;
}
for (int k=0; k<data.kVectors.cols(); k++) {
const Point& kv = data.kVectors.col(k);
EwaldData::Tcomplex Q(0,0);
if (data.ipbc)
for (auto &i : spc->p)
Q += kv.cwiseProduct(i.pos).array().cos().prod() * i.charge;
else
for (auto &i : spc->p) {
double dot = kv.dot(i.pos);
Q += i.charge * EwaldData::Tcomplex( std::cos(dot), std::sin(dot) );
}
data.Qion[k] = Q;
}
} //!< Update all k vectors
void updateComplex(EwaldData &data, iter begin, iter end) const {
assert(old!=nullptr);
assert(spc->p.size() == old->p.size());
size_t ibeg = std::distance(spc->p.begin(), begin); // it->index
size_t iend = std::distance(spc->p.begin(), end); // it->index
for (int k=0; k<data.kVectors.cols(); k++) {
auto& Q = data.Qion[k];
Point q = data.kVectors.col(k);
if (data.ipbc)
for (size_t i=ibeg; i<=iend; i++) {
Q += q.cwiseProduct( spc->p[i].pos ).array().cos().prod() * spc->p[i].charge;
Q -= q.cwiseProduct( old->p[i].pos ).array().cos().prod() * old->p[i].charge;
}
else
for (size_t i=ibeg; i<=iend; i++) {
double _new = q.dot(spc->p[i].pos);
double _old = q.dot(old->p[i].pos);
Q += spc->p[i].charge * EwaldData::Tcomplex( std::cos(_new), std::sin(_new) );
Q -= old->p[i].charge * EwaldData::Tcomplex( std::cos(_old), std::sin(_old) );
}
}
} //!< Optimized update of k subset. Require access to old positions through `old` pointer
double selfEnergy(const EwaldData &d) {
double E = 0;
for (auto& i : spc->p)
E += i.charge * i.charge;
return -d.alpha*E / std::sqrt(pc::pi) * d.lB;
}
double surfaceEnergy(const EwaldData &d) {
if (d.const_inf < 0.5)
return 0;
Point qr(0,0,0);
for (auto &i : spc->p)
qr += i.charge*i.pos;
return d.const_inf * 2 * pc::pi / ( (2*d.eps_surf+1) * spc->geo.getVolume() ) * qr.dot(qr) * d.lB;
}
double reciprocalEnergy(const EwaldData &d) {
double E = 0;
if (eigenopt) // known at compile time
E = d.Aks.cwiseProduct( d.Qion.cwiseAbs2() ).sum();
else
for (int k=0; k<d.Qion.size(); k++)
E += d.Aks[k] * std::norm( d.Qion[k] );
return 2 * pc::pi / spc->geo.getVolume() * E * d.lB;
}
};
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("[Faunus] Ewald - IonIonPolicy")
{
using doctest::Approx;
typedef Space<Geometry::Cuboid, Particle<Charge,Dipole>> Tspace;
Tspace spc;
spc.p.resize(2);
spc.geo = R"( {"length": 10} )"_json;
spc.p[0] = R"( {"pos": [0,0,0], "q": 1.0} )"_json;
spc.p[1] = R"( {"pos": [1,0,0], "q": -1.0} )"_json;
PolicyIonIon<Tspace> ionion(spc);
EwaldData data = R"({
"epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0,
"kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json;
data.ipbc = false; // PBC Ewald (http://dx.doi.org/10.1063/1.481216)
data.update( spc.geo.getLength() );
ionion.updateComplex( data );
CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) );
CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) );
CHECK( ionion.reciprocalEnergy(data) == Approx(0.21303063979675319*data.lB) );
data.ipbc = true; // IPBC Ewald
data.update( spc.geo.getLength() );
ionion.updateComplex( data );
CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) );
CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) );
CHECK( ionion.reciprocalEnergy(data) == Approx(0.0865107467*data.lB) );
}
#endif
/** @brief Ewald summation reciprocal energy */
template<class Tspace, class Policy=PolicyIonIon<Tspace>>
class Ewald : public Energybase {
private:
EwaldData data;
Policy policy;
public:
Tspace& spc;
Ewald(const json &j, Tspace &spc) : policy(spc), spc(spc) {
name = "ewald";
data = j;
data.update( spc.geo.getLength() );
policy.updateComplex(data); // brute force. todo: be selective
}
double energy(Change &change) override {
double u=0;
if (!change.empty()) {
// If the state is NEW (trial state), then update all k-vectors
if (key==NEW) {
if (change.all || change.dV) { // everything changes
data.update( spc.geo.getLength() );
policy.updateComplex(data); // update all (expensive!)
}
else {
if (change.groups.size()==1) { // exactly one group is moved
auto& d = change.groups[0];
auto& g = spc.groups[d.index];
if (d.atoms.size()==1) // exactly one atom is moved
policy.updateComplex(data, g.begin()+d.atoms[0], g.begin()+d.atoms[0]);
else
policy.updateComplex(data, g.begin(), g.end());
} else
policy.updateComplex(data);
}
}
u = policy.selfEnergy(data) + policy.surfaceEnergy(data) + policy.reciprocalEnergy(data);
}
return u;
}
void sync(Energybase *basePtr, Change &change) override {
auto other = dynamic_cast<decltype(this)>(basePtr);
assert(other);
if (other->key==OLD)
policy.old = &(other->spc); // give NEW access to OLD space for optimized updates
data = other->data; // copy everything!
} //!< Called after a move is rejected/accepted as well as before simulation
void to_json(json &j) const override {
j = data;
}
};
template<typename Tspace>
class Isobaric : public Energybase {
private:
Tspace& spc;
double P; // P/kT
public:
Isobaric(const json &j, Tspace &spc) : spc(spc) {
name = "isobaric";
cite = "Frenkel & Smith 2nd Ed (Eq. 5.4.13)";
P = j.value("P/mM", 0.0) * 1.0_mM;
if (P<1e-10) {
P = j.value("P/Pa", 0.0) * 1.0_Pa;
if (P<1e-10)
P = j.at("P/atm").get<double>() * 1.0_atm;
}
}
double energy(Change &change) override {
if (change.dV || change.all) {
double V = spc.geo.getVolume();
size_t N=0;
for (auto &g : spc.groups)
if (!g.empty()) {
if (g.atomic)
N += g.size();
else
N++;
}
return P*V-(N+1)*std::log(V);
} else return 0;
}
void to_json(json &j) const override {
j["P/atm"] = P / 1.0_atm;
j["P/mM"] = P / 1.0_mM;
j["P/Pa"] = P / 1.0_Pa;
_roundjson(j,5);
}
};
template<typename Tspace>
class ExternalPotential : public Energybase {
protected:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
bool COM=false; // apply on center-of-mass
Tspace& spc;
std::set<int> molids; // molecules to act upon
std::function<double(const Tparticle&)> func=nullptr; // energy of single particle
std::vector<std::string> _names;
template<class Tparticle>
double _energy(const Group<Tparticle> &g) const {
double u=0;
if (molids.find(g.id)!=molids.end()) {
if (COM) { // apply only to center of mass
Tparticle dummy;
dummy.pos = g.cm;
u = func(dummy);
} else {
for (auto &p : g) {
u += func(p);
if (std::isnan(u))
break;
}
}
}
return u;
} //!< External potential on a single particle
public:
ExternalPotential(const json &j, Tspace &spc) : spc(spc) {
name="external";
COM = j.value("com", false);
_names = j.at("molecules").get<decltype(_names)>(); // molecule names
auto _ids = names2ids(molecules<Tpvec>, _names); // names --> molids
molids = std::set<int>(_ids.begin(), _ids.end()); // vector --> set
if (molids.empty() || molids.size()!=_names.size() )
throw std::runtime_error(name + ": molecule list is empty");
}
double energy(Change &change) override {
assert(func!=nullptr);
double u=0;
if (change.dV || change.all) {
for (auto &g : spc.groups) { // check all groups
u += _energy(g);
if (std::isnan(u))
break;
}
} else
for (auto &d : change.groups) {
auto &g = spc.groups.at(d.index); // check specified groups
if (d.all || COM) // check all atoms in group
u += _energy(g);
else { // check only specified atoms in group
if (molids.find(g.id)!=molids.end())
for (auto i : d.atoms)
u += func( *(g.begin()+i) );
}
if (std::isnan(u))
break;
}
return u;
}
void to_json(json &j) const override {
j["molecules"] = _names;
j["com"] = COM;
}
}; //!< Base class for external potentials, acting on particles
template<typename Tspace, typename base=ExternalPotential<Tspace>>
class Confine : public base {
public:
enum Variant {sphere, cylinder, cuboid, none};
Variant type=none;
private:
Point origo={0,0,0}, dir={1,1,1};
Point low, high;
double radius, k;
bool scale=false;
std::map<std::string, Variant> m = {
{"sphere", sphere}, {"cylinder", cylinder}, {"cuboid", cuboid}
};
public:
Confine(const json &j, Tspace &spc) : base(j,spc) {
base::name = "confine";
k = value_inf(j, "k") * 1.0_kJmol; // get floating point; allow inf/-inf
type = m.at( j.at("type") );
if (type==sphere || type==cylinder) {
radius = j.at("radius");
origo = j.value("origo", origo);
scale = j.value("scale", scale);
if (type==cylinder)
dir = {1,1,0};
base::func = [&radius=radius, origo=origo, k=k, dir=dir](const typename base::Tparticle &p) {
double d2 = (origo-p.pos).cwiseProduct(dir).squaredNorm() - radius*radius;
if (d2>0)
return 0.5*k*d2;
return 0.0;
};
// If volume is scaled, also scale the confining radius by adding a trigger
// to `Space::scaleVolume()`
if (scale)
spc.scaleVolumeTriggers.push_back( [&radius=radius](Tspace &spc, double Vold, double Vnew) {
radius *= std::cbrt(Vnew/Vold); } );
}
if (type==cuboid) {
low = j.at("low").get<Point>();
high = j.at("high").get<Point>();
base::func = [low=low, high=high, k=k](const typename base::Tparticle &p) {
double u=0;
Point d = low-p.pos;
for (int i=0; i<3; ++i)
if (d[i]>0) u+=d[i]*d[i];
d = p.pos-high;
for (int i=0; i<3; ++i)
if (d[i]>0) u+=d[i]*d[i];
return 0.5*k*u;
};
}
}
void to_json(json &j) const override {
if (type==cuboid)
j = {{"low", low}, {"high", high}};
if (type==sphere || type==cylinder)
j = {{"radius", radius}};
if (type==sphere) {
j["origo"] = origo;
j["scale"] = scale;
}
for (auto &i : m)
if (i.second==type)
j["type"] = i.first;
j["k"] = k/1.0_kJmol;
base::to_json(j);
_roundjson(j,5);
}
}; //!< Confine particles to a sub-region of the simulation container
/*
* The keys of the `intra` map are group index and the values
* is a vector of `BondData`. For bonds between groups, fill
* in `inter` which is evaluated for every update of call to
* `energy`.
*
* @todo Optimize.
*/
template<typename Tspace>
class Bonded : public Energybase {
private:
Tspace& spc;
typedef typename Tspace::Tpvec Tpvec;
typedef std::vector<Potential::BondData> BondVector;
BondVector inter; // inter-molecular bonds
std::map<int,BondVector> intra; // intra-molecular bonds
void update() {
intra.clear();
for (size_t i=0; i<spc.groups.size(); i++) {
if (!spc.groups.empty()) {
auto &g = spc.groups[i];
intra[i] = molecules<Tpvec>.at(g.id).bonds;
for (auto &b : intra[i])
b.shift( std::distance(spc.p.begin(), g.begin()) );
}
}
} // finds and adds all intra-molecular bonds of active molecules
double sum( const BondVector &v ) const {
double u=0;
for (auto &b : v)
u += b.energy(spc.p, spc.geo.distanceFunc);
return u;
} // sum energy in vector of BondData
public:
Bonded(const json &j, Tspace &spc) : spc(spc) {
name = "bonded";
update();
if (j.is_object())
if (j.count("bondlist")==1)
inter = j["bondlist"].get<BondVector>();
}
void to_json(json &j) const override {
if (!inter.empty())
j["bondlist"] = inter;
if (!intra.empty()) {
json& _j = j["bondlist-intramolecular"];
_j = json::array();
for (auto &i : intra)
for (auto &b : i.second)
_j.push_back(b);
}
}
double energy(Change &c) override {
double u=0;
if ( !c.empty() ) {
u = sum(inter); // energy of inter-molecular bonds
if ( c.all || c.dV ) {
for (auto& i : intra) // energy of intra-molecular bonds
if (!spc.groups[i.first].empty()) // add only if group is active
u += sum(i.second);
} else
for (auto &d : c.groups)
if (d.internal)
u += sum( intra[d.index] );
}
return u;
}; // brute force -- refine this!
};
/**
* @brief Nonbonded energy using a pair-potential
*/
template<typename Tspace, typename Tpairpot>
class Nonbonded : public Energybase {
private:
double g2gcnt=0, g2gskip=0;
protected:
typedef typename Tspace::Tgroup Tgroup;
double Rc2_g2g=pc::infty;
void to_json(json &j) const override {
j["pairpot"] = pairpot;
j["cutoff_g2g"] = std::sqrt(Rc2_g2g);
}
template<typename T>
inline bool cut(const T &g1, const T &g2) {
g2gcnt++;
if (g1.atomic || g2.atomic)
return false;
if ( spc.geo.sqdist(g1.cm, g2.cm)<Rc2_g2g )
return false;
g2gskip++;
return true;
} //!< true if group<->group interaction can be skipped
template<typename T>
inline double i2i(const T &a, const T &b) {
assert(&a!=&b && "a and b cannot be the same particle");
return pairpot(a, b, spc.geo.vdist(a.pos, b.pos));
}
/*
* Internal energy in group, calculating all with all or, if `index`
* is given, only a subset. Index specifies the internal index (starting
* at zero) of changed particles within the group.
*/
double g_internal(const Tgroup &g, const std::vector<int> &index=std::vector<int>()) {
using namespace ranges;
double u=0;
if (index.empty()) // assume that all atoms have changed
for ( auto i = g.begin(); i != g.end(); ++i )
for ( auto j=i; ++j != g.end(); )
u += i2i(*i, *j);
else { // only a subset have changed
auto fixed = view::ints( 0, int(g.size()) )
| view::remove_if(
[&index](int i){return std::binary_search(index.begin(), index.end(), i);});
for (int i : index) // moved<->static
for (int j : fixed)
u += i2i( *(g.begin()+i), *(g.begin()+j));
for (int i : index) // moved<->moved
for (int j : index)
if (j>i)
u += i2i( *(g.begin()+i), *(g.begin()+j));
}
return u;
}
/*
* Calculates the interaction energy of a particle, `i`,
* and checks (1) if it is already part of Space, or (2)
* external to space.
*/
double i2all(const typename Tspace::Tparticle &i) {
double u=0;
auto it = spc.findGroupContaining(i); // iterator to group
if (it!=spc.groups.end()) { // check if i belongs to group in space
for (auto &g : spc.groups) // i with all other particles
if (&g!=&(*it)) // avoid self-interaction
if (!cut(g, *it)) // check g2g cut-off
for (auto &j : g) // loop over particles in other group
u += i2i(i,j);
for (auto &j : *it) // i with all particles in own group
if (&j!=&i)
u += i2i(i,j);
} else // particle does not belong to any group
for (auto &g : spc.groups) // i with all other *active* particles
for (auto &j : g) // (this will include only active particles)
u += i2i(i,j);
return u;
}
/*
* Group-to-group energy. A subset of `g1` can be given with `index` which refers
* to the internal index (starting at zero) of the first group, `g1`.
*/
virtual double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>()) {
double u = 0;
if (!cut(g1,g2)) {
if (index.empty()) // if index is empty, assume all in g1 have changed
for (auto &i : g1)
for (auto &j : g2)
u += i2i(i,j);
else // only a subset of g1
for (auto i : index)
for (auto &j : g2)
u += i2i( *(g1.begin()+i), j);
}
return u;
}
public:
Tspace& spc; //!< Space to operate on
Tpairpot pairpot; //!< Pair potential
Nonbonded(const json &j, Tspace &spc) : spc(spc) {
name="nonbonded";
pairpot = j;
Rc2_g2g = std::pow( j.value("cutoff_g2g", pc::infty), 2);
}
double energy(Change &change) override {
using namespace ranges;
double u=0;
if (!change.empty()) {
if (change.dV) {
#pragma omp parallel for reduction (+:u) schedule (dynamic)
for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) {
for ( auto j=i; ++j != spc.groups.end(); )
u += g2g( *i, *j );
if (i->atomic)
u += g_internal(*i);
}
return u;
}
// did everything change?
if (change.all) {
#pragma omp parallel for reduction (+:u) schedule (dynamic)
for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) {
for ( auto j=i; ++j != spc.groups.end(); )
u += g2g( *i, *j );
u += g_internal(*i);
}
// more todo here...
return u;
}
// if exactly ONE molecule is changed
if (change.groups.size()==1) {
auto& d = change.groups[0];
auto gindex = spc.groups.at(d.index).to_index(spc.p.begin()).first;
if (d.atoms.size()==1) // exactly one atom has moved
return i2all(spc.p.at(gindex+d.atoms[0]));
auto& g1 = spc.groups.at(d.index);
for (auto &g2 : spc.groups)
if (&g1 != &g2)
u += g2g(g1, g2, d.atoms);
if (d.internal)
u += g_internal(g1, d.atoms);
return u;
}
auto moved = change.touchedGroupIndex(); // index of moved groups
auto fixed = view::ints( 0, int(spc.groups.size()) )
| view::remove_if(
[&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);}
); // index of static groups
// moved<->moved
for ( auto i = moved.begin(); i != moved.end(); ++i )
for ( auto j=i; ++j != moved.end(); )
u += g2g( spc.groups[*i], spc.groups[*j] );
// moved<->static
for ( auto i : moved)
for ( auto j : fixed)
u += g2g(spc.groups[i], spc.groups[j]);
// more todo!
}
return u;
}
}; //!< Nonbonded, pair-wise additive energy term
template<typename Tspace, typename Tpairpot>
class NonbondedCached : public Nonbonded<Tspace,Tpairpot> {
private:
typedef Nonbonded<Tspace,Tpairpot> base;
typedef typename Tspace::Tgroup Tgroup;
Eigen::MatrixXf cache;
double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>()) override {
int i = &g1 - &base::spc.groups.front();
int j = &g2 - &base::spc.groups.front();
if (j<i)
std::swap(i,j);
if (base::key==Energybase::NEW) { // if this is from the trial system,
double u = 0;
if (!base::cut(g1,g2)) {
for (auto &i : g1)
for (auto &j : g2)
u += base::i2i(i,j);
}
cache(i,j) = u;
}
return cache(i,j); // return (cached) value
}
public:
NonbondedCached(const json &j, Tspace &spc) : base(j,spc) {
base::name += "EM";
cache.resize( spc.groups.size(), spc.groups.size() );
cache.setZero();
for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) {
for ( auto j=i; ++j != base::spc.groups.end(); ) {
int k = &(*i) - &base::spc.groups.front();
int l = &(*j) - &base::spc.groups.front();
if (l<k)
std::swap(k,l);
double u = 0;
if (!base::cut(*i,*j)) {
for (auto &k : *i)
for (auto &l : *j)
u += base::i2i(k,l);
}
cache(k,l) = u;
}
}
}
double energy(Change &change) override {
using namespace ranges;
double u=0;
if (!change.empty()) {
if (change.all || change.dV) {
#pragma omp parallel for reduction (+:u) schedule (dynamic)
for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) {
for ( auto j=i; ++j != base::spc.groups.end(); )
u += g2g( *i, *j );
}
return u;
}
// if exactly ONE molecule is changed
if (change.groups.size()==1) {
auto& d = change.groups[0];
auto& g1 = base::spc.groups.at(d.index);
for (auto &g2 : base::spc.groups) {
if (&g1 != &g2)
u += g2g(g1, g2, d.atoms);
}
return u;
}
auto moved = change.touchedGroupIndex(); // index of moved groups
auto fixed = view::ints( 0, int(base::spc.groups.size()) )
| view::remove_if(
[&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);}
); // index of static groups
// moved<->moved
for ( auto i = moved.begin(); i != moved.end(); ++i )
for ( auto j=i; ++j != moved.end(); ) {
u += g2g( base::spc.groups[*i], base::spc.groups[*j] );
}
// moved<->static
for ( auto i : moved)
for ( auto j : fixed)
u += g2g(base::spc.groups[i], base::spc.groups[j]);
// more todo!
}
return u;
}
void sync(Energybase *basePtr, Change &change) override {
auto other = dynamic_cast<decltype(this)>(basePtr);
assert(other);
if (change.all || change.dV)
cache.triangularView<Eigen::StrictlyUpper>() = (other->cache).template triangularView<Eigen::StrictlyUpper>();
else
for (auto &d : change.groups) {
for (int i=0; i<d.index; i++)
cache(i,d.index) = other->cache(i,d.index);
for (size_t i=d.index+1; i<base::spc.groups.size(); i++)
cache(d.index,i) = other->cache(d.index,i);
}
} //!< Copy energy matrix from other
}; //!< Nonbonded with cached energies (Energy Matrix)
/**
* `udelta` is the total change of updating the energy function. If
* not handled this will appear as an energy drift (which it is!). To
* avoid this, this term is added to the energy but since it's the
* same in both the trial and old state energies it will not affect
* MC move acceptance.
*/
template<typename Tspace>
class Penalty : public Energybase {
protected:
typedef typename Tspace::Tparticle Tparticle;
typedef typename Tspace::Tgroup Tgroup;
typedef typename Tspace::Tpvec Tpvec;
typedef typename std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> Tcoord;
Tspace &spc;
bool nodrift;
bool quiet;
size_t dim=0;
size_t cnt=0; // number of calls to `sync()`
size_t nupdate; // update frequency [steps]
size_t samplings=1;
double udelta=0; // total energy change of updating penalty function
double scale; // scaling factor for f0
double f0; // penalty increment
std::string file, hisfile;
std::vector<Tcoord> rcvec; // vector of reaction coordinate functions
std::vector<double> coord; // latest reaction coordinate
Table<int> histo;
Table<double> penalty;
public:
Penalty(const json &j, Tspace &spc) : spc(spc) {
using namespace ReactionCoordinate;
name = "penalty";
f0 = j.value("f0", 0.5);
scale = j.value("scale", 0.8);
quiet = j.value("quiet", true);
nupdate = j.value("update", 0);
nodrift = j.value("nodrift", true);
file = j.at("file").get<std::string>();
hisfile = j.value("histogram", "penalty-histogram.dat");
std::vector<double> binwidth, min, max;
if (scale<0 || scale>1)
throw std::runtime_error("`scale` must be in the interval [0:1]");
for (auto &i : j.at("coords"))
if (i.is_object())
if (i.size()==1) {
std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> rc=nullptr;
for (auto it=i.begin(); it!=i.end(); ++it) {
if (it.key()=="atom")
rc = std::make_shared<AtomProperty>(it.value(), spc);
if (it.key()=="charge")
rc = std::make_shared<AtomProperty>(it.value(), spc);
if (it.key()=="system")
rc = std::make_shared<SystemProperty>(it.value(), spc);
if (it.key()=="cmcm")
rc = std::make_shared<MassCenterSeparation>(it.value(), spc);;
if (rc!=nullptr) {
if (rc->min>=rc->max || rc->binwidth<=0)
throw std::runtime_error("min<max and binwidth>0 required for '" + it.key() + "'");
rcvec.push_back(rc);
binwidth.push_back( rc->binwidth );
min.push_back( rc->min );
max.push_back( rc->max );
} else
throw std::runtime_error("unknown coordinate type '" + it.key() + "'");
}
}
dim = binwidth.size();
if (dim<1 || dim>2)
throw std::runtime_error("minimum one maximum two coordinates required");
coord.resize(2,0);
histo.reInitializer(binwidth, min, max);
penalty.reInitializer(binwidth, min, max);
std::ifstream f(MPI::prefix+file);
if (f) {
cout << "Loading penalty function '" << MPI::prefix+file << "'" << endl;
std::string hash;
f >> hash >> f0 >> samplings;
for (int row=0; row<penalty.rows(); row++)
for (int col=0; col<penalty.cols(); col++)
if (!f.eof())
f >> penalty(row,col);
else
throw std::runtime_error("penalty file dimension mismatch");
}
}
virtual ~Penalty() {
std::ofstream f1(MPI::prefix + file), f2(MPI::prefix + hisfile);
if (f1) f1 << "# " << f0 << " " << samplings << "\n" << penalty.array() - penalty.minCoeff() << endl;
if (f2) f2 << histo << endl;
// add function to save to numpy-friendly file...
}
void to_json(json &j) const override {
j["file"] = file;
j["scale"] = scale;
j["update"] = nupdate;
j["nodrift"] = nodrift;
j["histogram"] = hisfile;
j["f0_final"] = f0;
auto& _j = j["coords"] = json::array();
for (auto rc : rcvec) {
json t;
t[rc->name] = *rc;
_j.push_back(t);
}
}
double energy(Change &change) override {
assert(rcvec.size()<=coord.size());
double u=0;
coord.resize( rcvec.size() );
if (!change.empty()) {
for (size_t i=0; i<rcvec.size(); i++) {
coord.at(i) = rcvec[i]->operator()();
if (!rcvec[i]->inRange(coord[i]))
return pc::infty;
}
penalty.to_index(coord);
u = penalty[coord];
}
return (nodrift) ? u - udelta : u;
}
virtual void update(const std::vector<double> &c) {
if (++cnt % nupdate == 0 && f0>0) {
bool b = histo.minCoeff() >= (int)samplings;
if (b && f0>0) {
double min = penalty.minCoeff();
penalty = penalty.array() - min;
if (!quiet)
cout << "Barriers/kT. Penalty=" << penalty.maxCoeff()
<< " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff())
<< endl;
f0 = f0 * scale; // reduce penalty energy
samplings = std::ceil( samplings / scale );
histo.setZero();
udelta += -min;
}
}
coord = c;
histo[coord]++;
penalty[coord] += f0;
udelta += f0;
}
void sync(Energybase *basePtr, Change &change) override {
auto other = dynamic_cast<decltype(this)>(basePtr);
assert(other);
update(other->coord);
other->update(other->coord);
} // @todo: this double the MPI communication
};
#ifdef ENABLE_MPI
template<typename Tspace, typename Base=Penalty<Tspace>>
struct PenaltyMPI : public Base {
using Base::samplings;
using Base::penalty;
using Base::udelta;
using Base::scale;
using Base::histo;
using Base::coord;
using Base::cnt;
using Base::f0;
Eigen::VectorXi weights;// array w. mininum histogram counts
Eigen::VectorXd buffer; // receive buffer for penalty functions
PenaltyMPI(const json &j, Tspace &spc) : Base(j,spc) {
weights.resize( MPI::mpi.nproc() );
buffer.resize( penalty.size()*MPI::mpi.nproc() );
}
void update(const std::vector<double> &c) override {
using namespace Faunus::MPI;
double uold = penalty[c];
if (++cnt % this->nupdate == 0 && f0>0) {
int min = histo.minCoeff();
MPI_Barrier(mpi.comm);
MPI_Allgather(&min, 1, MPI_INT, weights.data(), 1, MPI_INT, mpi.comm);
if ( weights.maxCoeff() >= samplings ) {
MPI_Gather(penalty.data(), penalty.size(), MPI_DOUBLE,
buffer.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm);
if (mpi.isMaster()) {
penalty.setZero();
for (int i=0; i<mpi.nproc(); i++)
penalty += double(weights[i]) * Eigen::Map<Eigen::MatrixXd>(
buffer.data()+i*penalty.size(), penalty.rows(), penalty.cols() );
penalty = ( penalty.array() - penalty.minCoeff() ) / double(weights.sum());
}
MPI_Bcast(penalty.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm);
if (min>0 && !this->quiet)
cout << "Barriers/kT. Penalty=" << penalty.maxCoeff()
<< " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl;
histo.setZero();
f0 = f0 * scale; // reduce penalty energy
samplings = std::ceil( samplings / scale );
}
}
coord = c;
histo[coord]++;
penalty[coord] += f0;
udelta += penalty[coord] - uold;
} //!< Average penalty function across all nodes
}; //!< Penalty function with MPI exchange
#endif
#ifdef FAU_POWERSASA
template<class Tspace>
class SASAEnergy : public Energybase {
typedef typename Tspace::Tparticle Tparticle;
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc;
std::vector<float> sasa, radii;
std::vector<Point> coords;
double probe; // sasa probe radius (angstrom)
double conc=0;// co-solute concentration (mol/l)
Average<double> avgArea; // average surface area
std::shared_ptr<POWERSASA::PowerSasa<float,Point>> ps;
void updateSASA(const Tpvec &p) {
radii.resize(p.size());
coords.resize(p.size());
std::transform(p.begin(), p.end(), coords.begin(), [](auto &a){ return a.pos;});
std::transform(p.begin(), p.end(), radii.begin(),
[this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;});
ps->update_coords(coords, radii); // slowest step!
for (size_t i=0; i<p.size(); i++) {
auto &a = atoms<Tparticle>[p[i].id];
if (std::fabs(a.tfe)>1e-9 || std::fabs(a.tension)>1e-9)
ps->calc_sasa_single(i);
}
sasa = ps->getSasa();
assert(sasa.size()==p.size());
}
void to_json(json &j) const override {
using namespace u8;
j["molarity"] = conc / 1.0_molar;
j["radius"] = probe / 1.0_angstrom;
j[bracket("SASA")+"/"+angstrom+squared] = avgArea.avg() / 1.0_angstrom;
_roundjson(j,5);
}
public:
SASAEnergy(const json &j, Tspace &spc) : spc(spc) {
name = "sasa";
cite = "doi:10.1002/jcc.21844";
probe = j.value("radius", 1.4) * 1.0_angstrom;
conc = j.value("molarity", conc) * 1.0_molar;
radii.resize(spc.p.size());
coords.resize(spc.p.size());
std::transform(spc.p.begin(), spc.p.end(), coords.begin(), [](auto &a){ return a.pos;});
std::transform(spc.p.begin(), spc.p.end(), radii.begin(),
[this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;});
ps = std::make_shared<POWERSASA::PowerSasa<float,Point>>(coords,radii);
}
double energy(Change &change) override {
double u=0, A=0;
updateSASA(spc.p);
for (size_t i=0; i<sasa.size(); ++i) {
auto &a = atoms<Tparticle>[ spc.p[i].id ];
u += sasa[i] * (a.tension + conc * a.tfe);
A += sasa[i];
}
avgArea+=A; // sample average area for accepted confs. only
return u;
}
}; //!< SASA energy from transfer free energies
#endif
struct Example2D : public Energybase {
Point& i; // reference to 1st particle in the system
template<typename Tspace>
Example2D(const json &j, Tspace &spc): i(spc.p.at(0).pos) { name = "Example2D"; }
double energy(Change &change) override {
double s=1+std::sin(2*pc::pi*i.x())+std::cos(2*pc::pi*i.y());
if (i.x()>=-2.00 && i.x()<=-1.25) return 1*s;
if (i.x()>=-1.25 && i.x()<=-0.25) return 2*s;
if (i.x()>=-0.25 && i.x()<= 0.75) return 3*s;
if (i.x()>= 0.75 && i.x()<= 1.75) return 4*s;
if (i.x()>= 1.75 && i.x()<= 2.00) return 5*s;
return 1e10;
}
};
template<typename Tspace>
class Hamiltonian : public Energybase, public BasePointerVector<Energybase> {
protected:
typedef typename Tspace::Tparticle Tparticle;
void to_json(json &j) const override {
for (auto i : this->vec)
j.push_back(*i);
}
void addEwald(const json &j, Tspace &spc) {
if (j.count("coulomb")==1)
if (j["coulomb"].at("type")=="ewald")
push_back<Energy::Ewald<Tspace>>(j["coulomb"], spc);
} //!< Adds an instance of reciprocal space Ewald energies (if appropriate)
public:
Hamiltonian(Tspace &spc, const json &j) {
using namespace Potential;
typedef CombinedPairPotential<CoulombGalore,LennardJones<Tparticle>> CoulombLJ;
typedef CombinedPairPotential<CoulombGalore,HardSphere<Tparticle>> CoulombHS;
typedef CombinedPairPotential<CoulombGalore,WeeksChandlerAndersen<Tparticle>> CoulombWCA;
typedef CombinedPairPotential<Coulomb,WeeksChandlerAndersen<Tparticle>> PrimitiveModelWCA;
Energybase::name="hamiltonian";
for (auto &m : j.at("energy")) {// loop over move list
size_t oldsize = vec.size();
for (auto it=m.begin(); it!=m.end(); ++it) {
try {
if (it.key()=="nonbonded_coulomblj")
push_back<Energy::Nonbonded<Tspace,CoulombLJ>>(it.value(), spc);
if (it.key()=="nonbonded")
push_back<Energy::Nonbonded<Tspace,FunctorPotential<typename Tspace::Tparticle>>>(it.value(), spc);
if (it.key()=="nonbonded_coulombhs")
push_back<Energy::Nonbonded<Tspace,CoulombHS>>(it.value(), spc);
if (it.key()=="nonbonded_coulombwca")
push_back<Energy::Nonbonded<Tspace,CoulombWCA>>(it.value(), spc);
if (it.key()=="nonbonded_pmwca")
push_back<Energy::Nonbonded<Tspace,PrimitiveModelWCA>>(it.value(), spc);
if (it.key()=="nonbonded_deserno")
push_back<Energy::NonbondedCached<Tspace,DesernoMembrane<typename Tspace::Tparticle>>>(it.value(), spc);
if (it.key()=="nonbonded_desernoAA")
push_back<Energy::NonbondedCached<Tspace,DesernoMembraneAA<typename Tspace::Tparticle>>>(it.value(), spc);
if (it.key()=="bonded")
push_back<Energy::Bonded<Tspace>>(it.value(), spc);
if (it.key()=="confine")
push_back<Energy::Confine<Tspace>>(it.value(), spc);
if (it.key()=="example2d")
push_back<Energy::Example2D>(it.value(), spc);
if (it.key()=="isobaric")
push_back<Energy::Isobaric<Tspace>>(it.value(), spc);
if (it.key()=="penalty")
#ifdef ENABLE_MPI
push_back<Energy::PenaltyMPI<Tspace>>(it.value(), spc);
#else
push_back<Energy::Penalty<Tspace>>(it.value(), spc);
#endif
#ifdef ENABLE_POWERSASA
if (it.key()=="sasa")
push_back<Energy::SASAEnergy<Tspace>>(it.value(), spc);
#endif
// additional energies go here...
addEwald(it.value(), spc); // add reciprocal Ewald terms if appropriate
if (vec.size()==oldsize)
std::cerr << "warning: ignoring unknown energy '" << it.key() << "'" << endl;
} catch (std::exception &e) {
throw std::runtime_error("Error adding energy '" + it.key() + "': " + e.what());
}
}
}
}
double energy(Change &change) override {
double du=0;
for (auto i : this->vec) {
i->key=key;
du += i->energy(change);
}
return du;
} //!< Energy due to changes
void sync(Energybase* basePtr, Change &change) override {
auto other = dynamic_cast<decltype(this)>(basePtr);
if (other!=NULL) {
if (other->size()==size())
for (size_t i=0; i<size(); i++)
this->vec[i]->sync( other->vec[i].get(), change);
} else
throw std::runtime_error("hamiltonian mismatch");
}
}; //!< Aggregates and sum energy terms
}//namespace
}//namespace
|
Project_Parallhla.c | #include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include "AllFunctions.h"
#define TOP_LEFT_CORNER_PIXEL_TAG 0
#define TOP_RIGHT_CORNER_PIXEL_TAG 1
#define BOTTOM_LEFT_CORNER_PIXEL_TAG 2
#define BOTTOM_RIGHT_CORNER_PIXEL_TAG 3
#define TOP_ROW_TAG 4
#define BOTTOM_ROW_TAG 5
#define LEFT_COLUMN_TAG 6
#define RIGHT_COLUMN_TAG 7
#define LEFT_COLUMN_GREEN_TAG 8
#define LEFT_COLUMN_BLUE_TAG 9
#define RIGHT_COLUMN_GREEN_TAG 10
#define RIGHT_COLUMN_BLUE_TAG 11
#define TOP_LEFT_CORNER_GREEN_PIXEL_TAG 12
#define TOP_RIGHT_CORNER_GREEN_PIXEL_TAG 13
#define BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG 14
#define BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG 15
#define TOP_LEFT_CORNER_BLUE_PIXEL_TAG 16
#define TOP_RIGHT_CORNER_BLUE_PIXEL_TAG 17
#define BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG 18
#define BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG 19
int main(int argc, char *argv[])
{
int myRank, comSize;
unsigned char **image_array;
int topRow,bottomRow,leftColumn,rightColumn;
unsigned char **sharpenedImageArray;
int rows, columns;
//INPUT FOR GREY OR RGB !!
MPI_Init (&argc, &argv); /* starts MPI */
MPI_Comm_rank (MPI_COMM_WORLD, &myRank); /* get current process id */
MPI_Comm_size (MPI_COMM_WORLD, &comSize); /* get number of processes */
double** H=(double**) malloc (3*sizeof(double*));
for(int i = 0; i < 3; i++){
H[i]=(double*) malloc(3*sizeof(double));
}
int opt;
int RGB;
while ((opt = getopt(argc, argv, "r:c:i:")) != -1) {
switch (opt) {
case 'r':
rows = atoi(optarg);
break;
case 'c':
columns = atoi(optarg);
break;
case 'i':
RGB = atoi(optarg);
break;
default:
fprintf(stderr, "ERROR BAD ARGUMENTS\n");
exit(EXIT_FAILURE);
}
}
int multiplier=1;
if (RGB==1){
multiplier=3;
}
MPI_File fh, dest;
MPI_Status status;
double mean;
double global_time;
double elapsedComm,elapsedR, elapsedW;
double startComm, startR, finishComm,finishR, startW, finishW;
if(RGB==1){
MPI_File_open( MPI_COMM_WORLD, "../images/waterfall_1920_2520.raw", MPI_MODE_RDONLY , MPI_INFO_NULL, &fh );
}
else{
MPI_File_open( MPI_COMM_WORLD, "../images/waterfall_grey_1920_2520.raw", MPI_MODE_RDONLY , MPI_INFO_NULL, &fh );
}
double startH[9];
MPI_Request sendHArray =MPI_REQUEST_NULL;
MPI_Request recvHArray =MPI_REQUEST_NULL;
if(myRank == 0){
getInput(&H);
for (int i = 0; i < 9; i++){
startH[i] = H[i/3][i%3];
}
for (int i = 0; i < 9; i++){
}
//MPI_Ibcast(startH, 9, MPI_DOUBLE, 0, MPI_COMM_WORLD, &request);
for(int i = 1; i<comSize; i++){
MPI_Isend(startH, 9, MPI_DOUBLE, i, 10, MPI_COMM_WORLD, &sendHArray);
}
}
else{
MPI_Irecv(startH, 9, MPI_DOUBLE, 0, 10, MPI_COMM_WORLD, &recvHArray);
}
int procSize = (int) sqrt(comSize);
int localRows = (int) rows/procSize;
int localColumns = (int) (multiplier*columns)/procSize;
unsigned char* data = (unsigned char*) malloc(localRows*localColumns*sizeof(unsigned char));
image_array = (unsigned char **) malloc ((localRows)*sizeof(unsigned char*));
MPI_Offset offset=(myRank/procSize)*localRows*(multiplier*columns) + (myRank%procSize)*localColumns;
MPI_File_seek( fh, offset, MPI_SEEK_SET );
if(RGB==1){
MPI_File_open( MPI_COMM_WORLD, "../images/sharpened_RGB.raw", MPI_MODE_WRONLY | MPI_MODE_CREATE , MPI_INFO_NULL, &dest);
}
else{
MPI_File_open( MPI_COMM_WORLD, "../images/sharpened.raw", MPI_MODE_WRONLY | MPI_MODE_CREATE , MPI_INFO_NULL, &dest);
}
startR = MPI_Wtime();
for(int i=0;i<localRows;i++){
image_array[i] = &(data[i*localColumns]);
MPI_File_read(fh, image_array[i], localColumns, MPI_UNSIGNED_CHAR,&status);
MPI_File_seek( fh,(procSize-1)*localColumns, MPI_SEEK_CUR );
}
finishR = MPI_Wtime();
elapsedR = finishR - startR;
MPI_Allreduce(&elapsedR, &global_time, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
if (myRank == 0){
mean = global_time/comSize;
printf("Mean time to read is %lf\n",mean);
}
unsigned char receivedTopLeftCorner;
unsigned char receivedTopRightCorner;
unsigned char receivedBottomRightCorner;
unsigned char receivedBottomLeftCorner;
unsigned char *receivedTopRow = (unsigned char *) malloc(localColumns*sizeof(unsigned char));
unsigned char *receivedRightColumn = (unsigned char *) malloc((localRows)*sizeof(unsigned char));
unsigned char *receivedBottomRow = (unsigned char *) malloc(localColumns*sizeof(unsigned char));
unsigned char *receivedLeftColumn = (unsigned char *) malloc((localRows)*sizeof(unsigned char));
unsigned char *receivedLeftColumnGreen = (unsigned char *) malloc((localRows)*sizeof(unsigned char));
unsigned char *receivedLeftColumnBlue = (unsigned char *) malloc((localRows)*sizeof(unsigned char));
unsigned char *receivedRightColumnGreen = (unsigned char *) malloc((localRows)*sizeof(unsigned char));
unsigned char *receivedRightColumnBlue = (unsigned char *) malloc((localRows)*sizeof(unsigned char));
unsigned char receivedTopLeftCornerGreen;
unsigned char receivedTopLeftCornerBlue;
unsigned char receivedTopRightCornerGreen;
unsigned char receivedTopRightCornerBlue;
unsigned char receivedBottomRightCornerGreen;
unsigned char receivedBottomRightCornerBlue;
unsigned char receivedBottomLeftCornerGreen;
unsigned char receivedBottomLeftCornerBlue;
//TOP OR BOTTOM PROCCESS
if(myRank/procSize==0){
topRow=1;
bottomRow=0;
}
else if(myRank/procSize==procSize-1){
topRow=0;
bottomRow=1;
}
else{
topRow=0;
bottomRow=0;
}
//LEFT OR RIGHT PROCCESS
if(myRank%procSize==0){
leftColumn=1;
rightColumn=0;
}
else if(myRank%procSize==procSize-1){
leftColumn=0;
rightColumn=1;
}
else{
leftColumn=0;
rightColumn=0;
}
//received buffer creation
//fill buffer with black
receivedTopLeftCorner = 0;
receivedTopRightCorner = 0;
receivedBottomRightCorner = 0;
receivedBottomLeftCorner = 0;
if (RGB==1){
receivedTopLeftCornerGreen = 0;
receivedTopLeftCornerBlue = 0;
receivedTopRightCornerGreen = 0;
receivedTopRightCornerBlue = 0;
receivedBottomRightCornerGreen = 0;
receivedBottomRightCornerBlue = 0;
receivedBottomLeftCornerGreen = 0;
receivedBottomLeftCornerBlue = 0;
}
for (int i = 0; i < localColumns; i++){
receivedTopRow[i] = 0;
receivedBottomRow[i] = 0;
}
for (int i = 0; i < localRows; i++){
receivedRightColumn[i] = 0;
receivedLeftColumn[i] = 0;
if (RGB ==1){
receivedRightColumnGreen[i] = 0;
receivedLeftColumnBlue[i] = 0;
receivedRightColumnBlue[i] = 0;
receivedLeftColumnGreen[i] = 0;
}
}
int numberOfRequests;
MPI_Request* sendRequestArray;
MPI_Request* receiveRequestArray;
MPI_Datatype column;
MPI_Type_vector(localRows,1,localColumns,MPI_UNSIGNED_CHAR,&column);
MPI_Type_commit(&column);
startComm =MPI_Wtime();
//flag to see if the imaged changed
int imageSame = 0;
int repeats = 0 ;
while(imageSame == 0 && repeats < 15){
if(topRow && leftColumn){
numberOfRequests = 3;
if (RGB ==1)
numberOfRequests=7;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//last row
MPI_Isend(image_array[localRows-1],localColumns,MPI_UNSIGNED_CHAR,myRank+procSize, BOTTOM_ROW_TAG,MPI_COMM_WORLD, &sendRequestArray[0]);
//last column
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,column,myRank+1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//bottom right pixel
MPI_Isend(&image_array[localRows-1][localColumns-(1*multiplier)],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[2]);
if (RGB==1){
//last column RGB
MPI_Isend(&image_array[0][localColumns-2],1,column,myRank+1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
MPI_Isend(&image_array[0][localColumns-1],1,column,myRank+1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
//bottom right pixel RGB
MPI_Isend(&image_array[localRows-1][localColumns-2],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[localRows-1][localColumns-1],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
}
//Receive
//first row of other
MPI_Irecv(receivedTopRow, localColumns, MPI_UNSIGNED_CHAR, myRank+procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//top left pixel of other
MPI_Irecv(&receivedTopLeftCorner, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_PIXEL_TAG,MPI_COMM_WORLD, &receiveRequestArray[1]);
//first column of other
MPI_Irecv(receivedLeftColumn, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
if (RGB == 1){
//top left pixel of other RGB
MPI_Irecv(&receivedTopLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_GREEN_PIXEL_TAG,MPI_COMM_WORLD, &receiveRequestArray[3]);
MPI_Irecv(&receivedTopLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_BLUE_PIXEL_TAG,MPI_COMM_WORLD, &receiveRequestArray[4]);
//first column of other RGB
MPI_Irecv(receivedLeftColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(receivedLeftColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
}
}
else if(topRow && !leftColumn && !rightColumn){
numberOfRequests=5;
if (RGB == 1)
numberOfRequests=13;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first column
MPI_Isend(&image_array[0][0],1,column,myRank-1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[0]);
//bottom left pixel
MPI_Isend(&image_array[localRows-1][0],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//last row
MPI_Isend(image_array[localRows-1],localColumns,MPI_UNSIGNED_CHAR,myRank+procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &sendRequestArray[2]);
//bottom right pixel
MPI_Isend(&image_array[localRows-1][localColumns-(1*multiplier)],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
//last column
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,column,myRank+1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
if (RGB == 1){
//first column RGB
MPI_Isend(&image_array[0][2],1,column,myRank-1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[0][1],1,column,myRank-1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
//bottom left pixel RGB
MPI_Isend(&image_array[localRows-1][1],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[7]);
MPI_Isend(&image_array[localRows-1][2],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[8]);
//bottom right pixel RGB
MPI_Isend(&image_array[localRows-1][localColumns-2],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[9]);
MPI_Isend(&image_array[localRows-1][localColumns-1],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[10]);
//last column RGB
MPI_Isend(&image_array[0][localColumns-2],1,column,myRank+1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[11]);
MPI_Isend(&image_array[0][localColumns-1],1,column,myRank+1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[12]);
}
//Receive
//last column of other
MPI_Irecv(receivedRightColumn, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//top right pixel of other
MPI_Irecv(&receivedTopRightCorner, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1,TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//first row of other
MPI_Irecv(receivedTopRow, localColumns, MPI_UNSIGNED_CHAR, myRank+procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
//top left pixel of other
MPI_Irecv(&receivedTopLeftCorner, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize,TOP_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
//first column
MPI_Irecv(receivedLeftColumn, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[4]);
if (RGB==1){
//top right pixel of other RGB
MPI_Irecv(&receivedTopRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1,TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(&receivedTopRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1,TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
//last column of other RGB
MPI_Irecv(receivedRightColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[7]);
MPI_Irecv(receivedRightColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[8]);
//top left pixel of other RGB
MPI_Irecv(&receivedTopLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize,TOP_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[9]);
MPI_Irecv(&receivedTopLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize,TOP_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[10]);
//first column of other RGB
MPI_Irecv(receivedLeftColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[11]);
MPI_Irecv(receivedLeftColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[12]);
}
}
else if(topRow && rightColumn){
numberOfRequests=3;
if (RGB==1)
numberOfRequests=7;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first column
MPI_Isend(&image_array[0][0],1,column,myRank-1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[0]);
//last row
MPI_Isend(image_array[localRows-1],localColumns,MPI_UNSIGNED_CHAR,myRank+procSize, BOTTOM_ROW_TAG,MPI_COMM_WORLD, &sendRequestArray[1]);
//bottom left pixel
MPI_Isend(&image_array[localRows-1][0],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[2]);
if (RGB == 1){
//first column RGB
MPI_Isend(&image_array[0][1],1,column,myRank-1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
MPI_Isend(&image_array[0][2],1,column,myRank-1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
//bottom left pixel RGB
MPI_Isend(&image_array[localRows-1][1],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[localRows-1][2],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
}
//Receive
//last column of other
MPI_Irecv(receivedRightColumn, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//top right pixel of other
MPI_Irecv(&receivedTopRightCorner, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1,TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//first row of other
MPI_Irecv(receivedTopRow, localColumns, MPI_UNSIGNED_CHAR, myRank+procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
if (RGB==1){
//last row of other RGB
MPI_Irecv(receivedRightColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
MPI_Irecv(receivedRightColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[4]);
//top right of other RGB
MPI_Irecv(&receivedTopRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1,TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(&receivedTopRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1,TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
}
}
else if(!topRow && !bottomRow && rightColumn){
numberOfRequests=5;
if (RGB==1)
numberOfRequests=11;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first row
MPI_Isend(image_array[0],localColumns,MPI_UNSIGNED_CHAR,myRank-procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &sendRequestArray[0]);
//top left pixel
MPI_Isend(&image_array[0][0],1,MPI_UNSIGNED_CHAR,myRank-1-procSize,TOP_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//first column
MPI_Isend(&image_array[0][0],1,column,myRank-1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[2]);
//bottom left pixel
MPI_Isend(&image_array[localRows-1][0],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
//last row
MPI_Isend(image_array[localRows-1],localColumns,MPI_UNSIGNED_CHAR,myRank+procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
if (RGB==1){
//top left pixel RGB
MPI_Isend(&image_array[0][1],1,MPI_UNSIGNED_CHAR,myRank-1-procSize,TOP_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[0][2],1,MPI_UNSIGNED_CHAR,myRank-1-procSize,TOP_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
//first column RGB
MPI_Isend(&image_array[0][1],1,column,myRank-1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[7]);
MPI_Isend(&image_array[0][2],1,column,myRank-1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[8]);
//bottom left pixel RGB
MPI_Isend(&image_array[localRows-1][1],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[9]);
MPI_Isend(&image_array[localRows-1][2],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[10]);
}
//Receive
//last row of other
MPI_Irecv(receivedBottomRow, localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//bottom right pixel of other
MPI_Irecv(&receivedBottomRightCorner, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//last column of other
MPI_Irecv(receivedRightColumn, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
//top right pixel of other
MPI_Irecv(&receivedTopRightCorner, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1, TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
//first row of other
MPI_Irecv(receivedTopRow, localColumns, MPI_UNSIGNED_CHAR, myRank+procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[4]);
if (RGB ==1){
//bottom right pixel of other RGB
MPI_Irecv(&receivedBottomRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(&receivedBottomRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
//last column of other RGB
MPI_Irecv(receivedRightColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[7]);
MPI_Irecv(receivedRightColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[8]);
//top right pixel of other RGB
MPI_Irecv(&receivedTopRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1, TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[9]);
MPI_Irecv(&receivedTopRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1, TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[10]);
}
}
else if(bottomRow && rightColumn){
numberOfRequests=3;
if (RGB == 1)
numberOfRequests=7;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first row
MPI_Isend(image_array[0],localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, TOP_ROW_TAG,MPI_COMM_WORLD, &sendRequestArray[0]);
//top left pixel
MPI_Isend(&image_array[0][0],1, MPI_UNSIGNED_CHAR, myRank-1-procSize, TOP_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//first column
MPI_Isend(&image_array[0][0],1,column,myRank-1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[2]);
if (RGB == 1){
//top left pixel RGB
MPI_Isend(&image_array[0][1],1, MPI_UNSIGNED_CHAR, myRank-1-procSize, TOP_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
MPI_Isend(&image_array[0][2],1, MPI_UNSIGNED_CHAR, myRank-1-procSize, TOP_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
//first column RGB
MPI_Isend(&image_array[0][1],1,column,myRank-1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[0][2],1,column,myRank-1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
}
//Receive
//last row of other
MPI_Irecv(receivedBottomRow, localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//bottom right pixel of other
MPI_Irecv(&receivedBottomRightCorner, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//last column of other
MPI_Irecv(receivedRightColumn, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
if (RGB == 1){
//bottom right pixel of other RGB
MPI_Irecv(&receivedBottomRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
MPI_Irecv(&receivedBottomRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[4]);
//last column of other RGB
MPI_Irecv(receivedRightColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(receivedRightColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
}
}
else if(bottomRow && !leftColumn && !rightColumn){
numberOfRequests=5;
if (RGB == 1)
numberOfRequests=13;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first column
MPI_Isend(&image_array[0][0],1,column, myRank-1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[0]);
//top left pixel
MPI_Isend(&image_array[0][0],1,MPI_UNSIGNED_CHAR, myRank-1-procSize,TOP_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//top row
MPI_Isend(image_array[0],localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, TOP_ROW_TAG,MPI_COMM_WORLD, &sendRequestArray[2]);
//top right pixel
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
//last column
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,column, myRank+1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
if (RGB ==1){
//first column RGB
MPI_Isend(&image_array[0][1],1,column,myRank-1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[0][2],1,column,myRank-1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
//top left pixel RGB
MPI_Isend(&image_array[0][1],1, MPI_UNSIGNED_CHAR, myRank-1-procSize, TOP_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[7]);
MPI_Isend(&image_array[0][2],1, MPI_UNSIGNED_CHAR, myRank-1-procSize, TOP_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[8]);
//top right RGB
MPI_Isend(&image_array[0][localColumns-2],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[9]);
MPI_Isend(&image_array[0][localColumns-1],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[10]);
//last column RGB
MPI_Isend(&image_array[0][1],1,column,myRank+1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[11]);
MPI_Isend(&image_array[0][2],1,column,myRank+1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[12]);
}
//Receive
//last column of other
MPI_Irecv(receivedRightColumn, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//bottom right pixel of other
MPI_Irecv(&receivedBottomRightCorner, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//last row of other
MPI_Irecv(receivedBottomRow, localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
//bottom left pixel of other
MPI_Irecv(&receivedBottomLeftCorner, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
//first column of other
MPI_Irecv(receivedLeftColumn, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[4]);
if (RGB==1){
//last column of other RGB
MPI_Irecv(receivedRightColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(receivedRightColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
//bottom right pixel of other RGB
MPI_Irecv(&receivedBottomRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[7]);
MPI_Irecv(&receivedBottomRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[8]);
//bottom left pixel of other RGB
MPI_Irecv(&receivedBottomLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[9]);
MPI_Irecv(&receivedBottomLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[10]);
//first column of other RGB
MPI_Irecv(receivedLeftColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[11]);
MPI_Irecv(receivedLeftColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[12]);
}
}
else if(bottomRow && leftColumn){
numberOfRequests=3;
if (RGB == 1)
numberOfRequests=7;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first row
MPI_Isend(image_array[0],localColumns,MPI_UNSIGNED_CHAR,myRank-procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &sendRequestArray[0]);
//top right pixel
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,MPI_UNSIGNED_CHAR,myRank+1-procSize, TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//last column
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,column,myRank+1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[2]);
if (RGB == 1){
//top right pixel RGB
MPI_Isend(&image_array[0][localColumns-2],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
MPI_Isend(&image_array[0][localColumns-1],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
//last column RGB
MPI_Isend(&image_array[0][localColumns-2],1,column,myRank+1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[0][localColumns-1],1,column,myRank+1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
}
//Receive
//last row of other
MPI_Irecv(receivedBottomRow, localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//bottom left pixel of other
MPI_Irecv(&receivedBottomLeftCorner, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//first column of other
MPI_Irecv(receivedLeftColumn, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
if (RGB==1){
//bottom left pixel of other RGB
MPI_Irecv(&receivedBottomLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
MPI_Irecv(&receivedBottomLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[4]);
//first column of other RGB
MPI_Irecv(receivedLeftColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(receivedLeftColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
}
}
else if(!bottomRow && !topRow && leftColumn){
numberOfRequests=5;
if (RGB == 1)
numberOfRequests=11;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first row
MPI_Isend(image_array[0],localColumns,MPI_UNSIGNED_CHAR,myRank-procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &sendRequestArray[0]);
//top right pixel
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//last column
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,column,myRank+1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[2]);
//bottom right pixel
MPI_Isend(&image_array[localRows-1][localColumns-(1*multiplier)],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
//last row
MPI_Isend(image_array[localRows-1],localColumns, MPI_UNSIGNED_CHAR, myRank+procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &sendRequestArray[4]);
if (RGB==1){
//top right pixel RGB
MPI_Isend(&image_array[0][localColumns-2],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[5]);
MPI_Isend(&image_array[0][localColumns-1],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[6]);
//last column RGB
MPI_Isend(&image_array[0][localColumns-2],1,column,myRank+1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[7]);
MPI_Isend(&image_array[0][localColumns-1],1,column,myRank+1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[8]);
//bottom right pixel RGB
MPI_Isend(&image_array[localRows-1][localColumns-2],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[9]);
MPI_Isend(&image_array[localRows-1][localColumns-1],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[10]);
}
//Receive
//last row of other
MPI_Irecv(receivedBottomRow, localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//bottom left pixel of other
MPI_Irecv(&receivedBottomLeftCorner, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//first column of other
MPI_Irecv(receivedLeftColumn, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
//top left pixel of other
MPI_Irecv(&receivedTopLeftCorner, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
//first row of other
MPI_Irecv(receivedTopRow, localColumns, MPI_UNSIGNED_CHAR, myRank+procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[4]);
if (RGB == 1){
//bottom left pixel of other RGB
MPI_Irecv(&receivedBottomLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
MPI_Irecv(&receivedBottomLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
//first column of other RGB
MPI_Irecv(receivedLeftColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[7]);
MPI_Irecv(receivedLeftColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[8]);
//top left pixel of other RGB
MPI_Irecv(&receivedTopLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[9]);
MPI_Irecv(&receivedTopLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[10]);
}
}
else{
numberOfRequests=8;
if (RGB == 1)
numberOfRequests=20;
sendRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
receiveRequestArray = (MPI_Request*) malloc(numberOfRequests*sizeof(MPI_Request));
//first column
MPI_Isend(&image_array[0][0],1,column,myRank-1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &sendRequestArray[0]);
//top left pixel
MPI_Isend(&image_array[0][0],1,MPI_UNSIGNED_CHAR,myRank-1-procSize, TOP_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[1]);
//top row
MPI_Isend(image_array[0],localColumns,MPI_UNSIGNED_CHAR,myRank-procSize, TOP_ROW_TAG,MPI_COMM_WORLD, &sendRequestArray[2]);
//top right pixel
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[3]);
//last column
MPI_Isend(&image_array[0][localColumns-(1*multiplier)],1,column,myRank+1, RIGHT_COLUMN_TAG,MPI_COMM_WORLD, &sendRequestArray[4]);
//bottom right pixel
MPI_Isend(&image_array[localRows-1][localColumns-(1*multiplier)],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG,MPI_COMM_WORLD, &sendRequestArray[5]);
//last row
MPI_Isend(image_array[localRows-1],localColumns,MPI_UNSIGNED_CHAR,myRank+procSize, BOTTOM_ROW_TAG,MPI_COMM_WORLD, &sendRequestArray[6]);
//bottom left pixel
MPI_Isend(&image_array[localRows-1][0],1,MPI_UNSIGNED_CHAR, myRank-1+procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[7]);
if (RGB ==1){
//first column RGB
MPI_Isend(&image_array[0][1],1,column,myRank-1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[8]);
MPI_Isend(&image_array[0][2],1,column,myRank-1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[9]);
//top left pixel RGB
MPI_Isend(&image_array[0][1],1, MPI_UNSIGNED_CHAR, myRank-1-procSize, TOP_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[10]);
MPI_Isend(&image_array[0][2],1, MPI_UNSIGNED_CHAR, myRank-1-procSize, TOP_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[11]);
//top right pixel RGB
MPI_Isend(&image_array[0][localColumns-2],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[12]);
MPI_Isend(&image_array[0][localColumns-1],1, MPI_UNSIGNED_CHAR, myRank+1-procSize, TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[13]);
//last column RGB
MPI_Isend(&image_array[0][localColumns-2],1,column,myRank+1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &sendRequestArray[14]);
MPI_Isend(&image_array[0][localColumns-2],1,column,myRank+1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &sendRequestArray[15]);
//bottom right pixel RGB
MPI_Isend(&image_array[localRows-1][localColumns-2],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[16]);
MPI_Isend(&image_array[localRows-1][localColumns-1],1,MPI_UNSIGNED_CHAR,myRank+1+procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[17]);
//bottom left pixel RGB
MPI_Isend(&image_array[localRows-1][1],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[18]);
MPI_Isend(&image_array[localRows-1][2],1,MPI_UNSIGNED_CHAR,myRank-1+procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &sendRequestArray[19]);
}
//Receive
//bottom right pixel of other
MPI_Irecv(&receivedBottomRightCorner, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[0]);
//last row of other
MPI_Irecv(receivedBottomRow, localColumns, MPI_UNSIGNED_CHAR, myRank-procSize, BOTTOM_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[1]);
//bottom left pixel of other
MPI_Irecv(&receivedBottomRightCorner, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[2]);
//first column of other
MPI_Irecv(receivedLeftColumn, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[3]);
//top left pixel of other
MPI_Irecv(&receivedTopLeftCorner, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_PIXEL_TAG,MPI_COMM_WORLD, &receiveRequestArray[4]);
//first row of other
MPI_Irecv(receivedTopRow, localColumns, MPI_UNSIGNED_CHAR, myRank+procSize, TOP_ROW_TAG, MPI_COMM_WORLD, &receiveRequestArray[5]);
//top right pixel of other
MPI_Irecv(&receivedTopRightCorner, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1, TOP_RIGHT_CORNER_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[6]);
//last column of other
MPI_Irecv(receivedRightColumn, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_TAG, MPI_COMM_WORLD, &receiveRequestArray[7]);
if (RGB == 1){
//bottom right pixel of other RGB
MPI_Irecv(&receivedBottomRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[8]);
MPI_Irecv(&receivedBottomRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank-1-procSize, BOTTOM_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[9]);
//bottom left pixel of other RGB
MPI_Irecv(&receivedBottomLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[10]);
MPI_Irecv(&receivedBottomLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1-procSize, BOTTOM_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[11]);
//first column of other RGB
MPI_Irecv(receivedLeftColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[12]);
MPI_Irecv(receivedLeftColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank+1, LEFT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[13]);
//top left pixel of other RGB
MPI_Irecv(&receivedTopLeftCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[14]);
MPI_Irecv(&receivedTopLeftCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+1+procSize, TOP_LEFT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[15]);
//top right pixel of other RGB
MPI_Irecv(&receivedTopRightCornerGreen, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1, TOP_RIGHT_CORNER_GREEN_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[16]);
MPI_Irecv(&receivedTopRightCornerBlue, 1, MPI_UNSIGNED_CHAR, myRank+procSize-1, TOP_RIGHT_CORNER_BLUE_PIXEL_TAG, MPI_COMM_WORLD, &receiveRequestArray[17]);
//last column of other RGB
MPI_Irecv(receivedRightColumnGreen, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_GREEN_TAG, MPI_COMM_WORLD, &receiveRequestArray[18]);
MPI_Irecv(receivedRightColumnBlue, localRows, MPI_UNSIGNED_CHAR, myRank-1, RIGHT_COLUMN_BLUE_TAG, MPI_COMM_WORLD, &receiveRequestArray[19]);
}
}
if(myRank == 0){
MPI_Waitall(1, &sendHArray, MPI_STATUSES_IGNORE);
}
else{
MPI_Waitall(1, &recvHArray, MPI_STATUSES_IGNORE);
}
int k = 0;
for (int i=0; i<3; i++){
for(int j=0; j<3; j++){
H[i][j] = startH[k];
k++;
}
}
sharpenedImageArray = (unsigned char**) malloc(localRows*sizeof(unsigned char*));
for(int i = 0; i <localRows; i++){
sharpenedImageArray[i] = (unsigned char*) malloc(localColumns*sizeof(unsigned char));
}
int startpoint=1;
if(RGB==1){
startpoint=3;
}
unsigned char centerPixel;
//#pragma omp parallel for schedule(static) collapse(2)
for(int i=1;i<localRows-1;i++){
for(int j = startpoint; j < localColumns-startpoint; j++){
centerPixel=0;
centerPixel += (unsigned char) ((double) image_array[i-1][j-(1*multiplier)])* H[0][0];
centerPixel += (unsigned char) ((double) image_array[i-1][j])* H[0][1];
centerPixel += (unsigned char) ((double) image_array[i-1][j+(1*multiplier)])*H[0][2];
centerPixel += (unsigned char) ((double) image_array[i][j-(1*multiplier)])*H[1][0];
centerPixel += (unsigned char) ((double) image_array[i][j])*H[1][1];
centerPixel += (unsigned char) ((double) image_array[i][j+(1*multiplier)])*H[1][2];
centerPixel += (unsigned char) ((double) image_array[i+1][j-(1*multiplier)])*H[2][0];
centerPixel += (unsigned char) ((double) image_array[i+1][j])*H[2][1];
centerPixel += (unsigned char) ((double) image_array[i+1][j+(1*multiplier)])*H[2][2];
sharpenedImageArray[i][j] = centerPixel;
}
}
MPI_Waitall(numberOfRequests,receiveRequestArray, MPI_STATUSES_IGNORE);
if(RGB==0){
SharpenEdges(image_array,receivedTopLeftCorner,receivedTopRightCorner,
receivedBottomRightCorner,receivedBottomLeftCorner,receivedTopRow,
receivedBottomRow,receivedRightColumn,receivedLeftColumn,
H,localRows,localColumns,&sharpenedImageArray,myRank);
}
else{
SharpenEdgesRGB(image_array,receivedTopLeftCorner,
receivedTopRightCorner,receivedBottomRightCorner,
receivedBottomLeftCorner,receivedTopRow,
receivedBottomRow,receivedRightColumn,
receivedLeftColumn,receivedTopLeftCornerGreen,receivedTopLeftCornerBlue,
receivedTopRightCornerGreen,receivedTopRightCornerBlue,
receivedBottomRightCornerGreen,receivedBottomRightCornerBlue,receivedBottomLeftCornerGreen,
receivedBottomLeftCornerBlue,receivedRightColumnGreen,receivedRightColumnBlue,
receivedLeftColumnGreen,receivedLeftColumnBlue,H,
localRows,localColumns,&sharpenedImageArray,myRank);
}
MPI_Waitall(numberOfRequests,sendRequestArray, MPI_STATUSES_IGNORE);
free(sendRequestArray);
free(receiveRequestArray);
//compare
int flag = 0;//flag to Send
int flagCounter = 0; //counter the flags that has receive /
for(int i=0; i< localRows; i++){
for(int j=0; j<localColumns; j++){
if(((image_array[i][j] - 0.9) != (double)sharpenedImageArray[i][j]) || ((double)image_array[i][j] != (sharpenedImageArray[i][j] - 0.9))){
flag=1;
break;
}
}
if(flag==1) break;
}
if(flag != 0){ //not the same image
for(int i=0; i<localRows; i++){
for(int j=0; j<localColumns; j++){
image_array[i][j] = sharpenedImageArray[i][j];
}
}
}
MPI_Allreduce(&flag, &flagCounter, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
if (myRank == 0){
if (flagCounter == 0){
imageSame = 1;
}
flagCounter=0;
}
flag=0;
repeats ++;
}
finishComm=MPI_Wtime();
elapsedComm = finishComm - startComm;
MPI_Allreduce(&elapsedComm, &global_time, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
if(myRank == 0){
mean = global_time/comSize;
printf("Mean time of process is %lf\n",mean);
}
MPI_File_seek( dest, offset, MPI_SEEK_SET );
startW = MPI_Wtime();
for(int i=0;i<localRows;i++){
MPI_File_write(dest, sharpenedImageArray[i],localColumns, MPI_UNSIGNED_CHAR,&status);
MPI_File_seek( dest, (procSize-1)*localColumns, MPI_SEEK_CUR );
}
finishW = MPI_Wtime();
elapsedW = finishW - startW;
MPI_Allreduce(&elapsedW, &global_time, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
if (myRank == 0){
mean = global_time/comSize;
printf("Mean time to write is %lf\n",mean);
}
MPI_File_close(&dest);
MPI_File_close(&fh);
for(int i=0;i<3;i++){
free(H[i]);
}
free(H);
free(receivedTopRow);
free(receivedRightColumn);
free(receivedBottomRow);
free(receivedLeftColumn);
free(receivedLeftColumnGreen);
free(receivedLeftColumnBlue);
free(receivedRightColumnGreen);
free(receivedRightColumnBlue);
for(int i=0; i<localRows; i++){
free(sharpenedImageArray[i]);
}
free(data);
free(image_array);
free(sharpenedImageArray);
MPI_Finalize();
exit(EXIT_SUCCESS);
}
|
nl_matrix.c | /*
* Copyright (c) 2004-2010, Bruno Levy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the ALICE Project-Team nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* If you modify this software, you should include a notice giving the
* name of the person performing the modification, the date of modification,
* and the reason for such modification.
*
* Contact: Bruno Levy
*
* levy@loria.fr
*
* ALICE Project
* LORIA, INRIA Lorraine,
* Campus Scientifique, BP 239
* 54506 VANDOEUVRE LES NANCY CEDEX
* FRANCE
*
*/
#include "nl_matrix.h"
#include "nl_superlu.h"
#include "nl_cholmod.h"
#include "nl_mkl.h"
#include "nl_context.h"
#include "nl_blas.h"
/*
Some warnings about const cast in callback for
qsort() function.
*/
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
/************************************************************************/
void nlDeleteMatrix(NLMatrix M) {
if(M == NULL) {
return;
}
M->destroy_func(M);
NL_DELETE(M);
}
void nlMultMatrixVector(
NLMatrix M, const double* x, double* y
) {
M->mult_func(M,x,y);
}
/************************************************************************/
void nlRowColumnConstruct(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
c->coeff = NULL;
}
void nlRowColumnDestroy(NLRowColumn* c) {
NL_DELETE_ARRAY(c->coeff);
c->size = 0;
c->capacity = 0;
}
void nlRowColumnGrow(NLRowColumn* c) {
if(c->capacity != 0) {
c->capacity = 2 * c->capacity;
c->coeff = NL_RENEW_ARRAY(NLCoeff, c->coeff, c->capacity);
} else {
c->capacity = 4;
c->coeff = NL_NEW_ARRAY(NLCoeff, c->capacity);
}
}
void nlRowColumnAdd(NLRowColumn* c, NLuint index, NLdouble value) {
NLuint i;
for(i=0; i<c->size; i++) {
if(c->coeff[i].index == index) {
c->coeff[i].value += value;
return;
}
}
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
/* Does not check whether the index already exists */
void nlRowColumnAppend(NLRowColumn* c, NLuint index, NLdouble value) {
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
void nlRowColumnZero(NLRowColumn* c) {
c->size = 0;
}
void nlRowColumnClear(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
NL_DELETE_ARRAY(c->coeff);
}
static int nlCoeffCompare(const void* p1, const void* p2) {
return (((NLCoeff*)(p2))->index < ((NLCoeff*)(p1))->index);
}
void nlRowColumnSort(NLRowColumn* c) {
qsort(c->coeff, c->size, sizeof(NLCoeff), nlCoeffCompare);
}
/******************************************************************************/
/* CRSMatrix data structure */
/**
* \brief Destroys a NLCRSMatrix
* \details Only the memory allocated by the NLCRSMatrix is freed,
* The NLCRSMatrix structure is not freed.
* \param[in,out] M pointer to an NLCRSMatrix
* \relates NLCRSMatrix
*/
static void nlCRSMatrixDestroy(NLCRSMatrix* M) {
NL_DELETE_ARRAY(M->val);
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->sliceptr);
M->m = 0;
M->n = 0;
M->nslices = 0;
}
NLboolean nlCRSMatrixSave(NLCRSMatrix* M, const char* filename) {
NLuint nnz = M->rowptr[M->m];
FILE* f = fopen(filename, "rb");
if(f == NULL) {
nlError("nlCRSMatrixSave", "Could not open file");
return NL_FALSE;
}
fwrite(&M->m, sizeof(NLuint), 1, f);
fwrite(&M->n, sizeof(NLuint), 1, f);
fwrite(&nnz, sizeof(NLuint), 1, f);
fwrite(M->rowptr, sizeof(NLuint), M->m+1, f);
fwrite(M->colind, sizeof(NLuint), nnz, f);
fwrite(M->val, sizeof(double), nnz, f);
return NL_TRUE;
}
NLboolean nlCRSMatrixLoad(NLCRSMatrix* M, const char* filename) {
NLuint nnz = 0;
FILE* f = fopen(filename, "rb");
NLboolean truncated = NL_FALSE;
if(f == NULL) {
nlError("nlCRSMatrixLoad", "Could not open file");
return NL_FALSE;
}
truncated = truncated || (
fread(&M->m, sizeof(NLuint), 1, f) != 1 ||
fread(&M->n, sizeof(NLuint), 1, f) != 1 ||
fread(&nnz, sizeof(NLuint), 1, f) != 1
);
if(truncated) {
M->rowptr = NULL;
M->colind = NULL;
M->val = NULL;
} else {
M->rowptr = NL_NEW_ARRAY(NLuint, M->m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->val = NL_NEW_ARRAY(double, nnz);
truncated = truncated || (
fread(M->rowptr, sizeof(NLuint), M->m+1, f) != M->m+1 ||
fread(M->colind, sizeof(NLuint), nnz, f) != nnz ||
fread(M->val, sizeof(double), nnz, f) != nnz
);
}
if(truncated) {
nlError("nlCRSMatrixSave", "File appears to be truncated");
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->val);
return NL_FALSE;
} else {
M->nslices = 1;
M->sliceptr = NL_NEW_ARRAY(NLuint, M->nslices+1);
M->sliceptr[0] = 0;
M->sliceptr[1] = M->m;
}
fclose(f);
return NL_TRUE;
}
NLuint nlCRSMatrixNNZ(NLCRSMatrix* M) {
return M->rowptr[M->m];
}
static void nlCRSMatrixMultSlice(
NLCRSMatrix* M, const double* x, double* y, NLuint Ibegin, NLuint Iend
) {
NLuint i,j;
for(i=Ibegin; i<Iend; ++i) {
double sum=0.0;
for(j=M->rowptr[i]; j<M->rowptr[i+1]; ++j) {
sum += M->val[j] * x[M->colind[j]];
}
y[i] = sum;
}
}
/**
* \brief Computes a matrix-vector product
* \param[in] M a pointer to the matrix
* \param[in] x the vector to be multiplied, size = A->n
* \param[in] y where to store the result, size = A->m
* \relates NLSparseMatrix
*/
static void nlCRSMatrixMult(
NLCRSMatrix* M, const double* x, double* y
) {
int slice;
int nslices = (int)(M->nslices);
NLuint i,j,jj;
NLdouble a;
if(M->symmetric_storage) {
for(i=0; i<M->m; ++i) {
y[i] = 0.0;
}
for(i=0; i<M->m; ++i) {
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
a = M->val[jj];
j = M->colind[jj];
y[i] += a * x[j];
if(j != i) {
y[j] += a * x[i];
}
}
}
} else {
#if defined(_OPENMP)
#pragma omp parallel for private(slice)
#endif
for(slice=0; slice<nslices; ++slice) {
nlCRSMatrixMultSlice(
M,x,y,M->sliceptr[slice],M->sliceptr[slice+1]
);
}
}
nlHostBlas()->flops += (NLulong)(2*nlCRSMatrixNNZ(M));
}
void nlCRSMatrixConstruct(
NLCRSMatrix* M, NLuint m, NLuint n, NLuint nnz, NLuint nslices
) {
M->m = m;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
if(NLMultMatrixVector_MKL != NULL) {
M->mult_func = (NLMultMatrixVectorFunc)NLMultMatrixVector_MKL;
} else {
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
}
M->nslices = nslices;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint, m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NL_NEW_ARRAY(NLuint, nslices+1);
M->symmetric_storage = NL_FALSE;
}
void nlCRSMatrixConstructSymmetric(
NLCRSMatrix* M, NLuint n, NLuint nnz
) {
M->m = n;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
M->nslices = 0;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint, n+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NULL;
M->symmetric_storage = NL_TRUE;
}
void nlCRSMatrixConstructPattern(
NLCRSMatrix* M, NLuint m, NLuint n
) {
M->m = m;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
if(NLMultMatrixVector_MKL != NULL) {
M->mult_func = (NLMultMatrixVectorFunc)NLMultMatrixVector_MKL;
} else {
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
}
M->nslices = 0;
M->val = NULL;
M->rowptr = NL_NEW_ARRAY(NLuint, m+1);
M->colind = NULL;
M->sliceptr = NULL;
M->symmetric_storage = NL_FALSE;
}
void nlCRSMatrixConstructPatternSymmetric(
NLCRSMatrix* M, NLuint n
) {
M->m = n;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
M->nslices = 0;
M->val = NULL;
M->rowptr = NL_NEW_ARRAY(NLuint, n+1);
M->colind = NULL;
M->sliceptr = NULL;
M->symmetric_storage = NL_TRUE;
}
void nlCRSMatrixPatternSetRowLength(
NLCRSMatrix* M, NLuint i, NLuint n
) {
nl_assert(i < M->m);
nl_assert(n <= M->n);
/* Test that matrix is in 'pattern' state */
nl_assert(M->colind == NULL);
nl_assert(M->val == NULL);
/* Store row length in rowptr */
M->rowptr[i+1] = n;
}
void nlCRSMatrixComputeSlices(NLCRSMatrix* CRS);
void nlCRSMatrixComputeSlices(NLCRSMatrix* CRS) {
NLuint slice_size = CRS->rowptr[CRS->m] / CRS->nslices;
NLuint slice, cur_bound, cur_NNZ, cur_row;
/* Create "slices" to be used by parallel sparse matrix vector product */
if(CRS->sliceptr != NULL) {
cur_bound = slice_size;
cur_NNZ = 0;
cur_row = 0;
CRS->sliceptr[0]=0;
for(slice=1; slice<CRS->nslices; ++slice) {
while(cur_NNZ < cur_bound && cur_row < CRS->m) {
++cur_row;
cur_NNZ += CRS->rowptr[cur_row+1] - CRS->rowptr[cur_row];
}
CRS->sliceptr[slice] = cur_row;
cur_bound += slice_size;
}
CRS->sliceptr[CRS->nslices]=CRS->m;
}
}
void nlCRSMatrixPatternCompile(NLCRSMatrix* M) {
NLuint nslices = 8; /* TODO get number of cores */
NLuint i;
NLuint nnz,k;
/* Test that matrix is in 'pattern' state */
nl_assert(M->colind == NULL);
nl_assert(M->val == NULL);
for(i=0; i<M->m; ++i) {
M->rowptr[i+1] += M->rowptr[i];
}
nnz = M->rowptr[M->m];
M->val = NL_NEW_ARRAY(double, nnz);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
for(k=0; k<nnz; ++k) {
M->colind[k] = (NLuint)(-1);
}
M->sliceptr = NL_NEW_ARRAY(NLuint, nslices+1);
M->nslices = nslices;
nlCRSMatrixComputeSlices(M);
}
void nlCRSMatrixAdd(
NLCRSMatrix* M, NLuint i, NLuint j, NLdouble value
) {
NLuint jj;
/* Test that matrix is in 'compiled' state */
nl_assert(M->colind != NULL);
nl_assert(M->val != NULL);
nl_assert(i < M->m);
nl_assert(j < M->n);
if(M->symmetric_storage && j > i) {
return;
}
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
if(M->colind[jj] == j) {
M->val[jj] += value;
return;
} else if(M->colind[jj] == (NLuint)(-1)) {
M->colind[jj] = j;
M->val[jj] += value;
return;
}
}
/* If this line is reached, it means that too many coefficients
* were added to row j, i.e. a number of coefficients larger than
* the row length previously declared with nlCRSMatrixPatternSetRowLength()
*/
nl_assert_not_reached;
}
/******************************************************************************/
/* SparseMatrix data structure */
static void nlSparseMatrixDestroyRowColumns(NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnDestroy(&(M->row[i]));
}
NL_DELETE_ARRAY(M->row);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnDestroy(&(M->column[i]));
}
NL_DELETE_ARRAY(M->column);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_COLUMNS);
}
void nlSparseMatrixDestroy(NLSparseMatrix* M) {
nl_assert(M->type == NL_MATRIX_SPARSE_DYNAMIC);
nlSparseMatrixDestroyRowColumns(M);
NL_DELETE_ARRAY(M->diag);
#ifdef NL_PARANOID
NL_CLEAR(NLSparseMatrix,M);
#endif
}
void nlSparseMatrixAdd(NLSparseMatrix* M, NLuint i, NLuint j, NLdouble value) {
nl_parano_range_assert(i, 0, M->m - 1);
nl_parano_range_assert(j, 0, M->n - 1);
if((M->storage & NL_MATRIX_STORE_SYMMETRIC) && (j > i)) {
return;
}
if(i == j) {
M->diag[i] += value;
}
if(M->storage & NL_MATRIX_STORE_ROWS) {
nlRowColumnAdd(&(M->row[i]), j, value);
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
nlRowColumnAdd(&(M->column[j]), i, value);
}
}
static void nlSparseMatrixAddSparseMatrix(
NLSparseMatrix* M, double mul, const NLSparseMatrix* N
) {
NLuint i,j,ii,jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->storage & NL_MATRIX_STORE_SYMMETRIC);
}
if(N->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<N->m; ++i) {
for(jj=0; jj<N->row[i].size; ++jj) {
nlSparseMatrixAdd(
M,
i, N->row[i].coeff[jj].index,
mul*N->row[i].coeff[jj].value
);
}
}
} else {
nl_assert(N->storage & NL_MATRIX_STORE_COLUMNS);
for(j=0; j<N->n; ++j) {
for(ii=0; ii<N->column[j].size; ++ii) {
nlSparseMatrixAdd(
M,
N->column[j].coeff[ii].index, j,
mul*N->column[j].coeff[ii].value
);
}
}
}
}
static void nlSparseMatrixAddCRSMatrix(
NLSparseMatrix* M, double mul, const NLCRSMatrix* N
) {
NLuint i,jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
for(i=0; i<M->m; ++i) {
for(jj=N->rowptr[i]; jj<N->rowptr[i+1]; ++jj) {
nlSparseMatrixAdd(
M,
i,
N->colind[jj],
mul*N->val[jj]
);
}
}
}
void nlSparseMatrixAddMatrix(
NLSparseMatrix* M, double mul, const NLMatrix N
) {
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->type == NL_MATRIX_SPARSE_DYNAMIC) {
nlSparseMatrixAddSparseMatrix(M, mul, (const NLSparseMatrix*)N);
} else if(N->type == NL_MATRIX_CRS) {
nlSparseMatrixAddCRSMatrix(M, mul, (const NLCRSMatrix*)N);
} else {
nl_assert_not_reached;
}
}
void nlSparseMatrixZero( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnZero(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnZero(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
void nlSparseMatrixClear( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnClear(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnClear(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
/* Returns the number of non-zero coefficients */
NLuint nlSparseMatrixNNZ( NLSparseMatrix* M) {
NLuint nnz = 0;
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nnz += M->row[i].size;
}
} else if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nnz += M->column[i].size;
}
} else {
nl_assert_not_reached;
}
return nnz;
}
void nlSparseMatrixSort( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nlRowColumnSort(&(M->row[i]));
}
}
if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nlRowColumnSort(&(M->column[i]));
}
}
}
void nlSparseMatrixMAddRow(
NLSparseMatrix* M, NLuint i1, double s, NLuint i2
) {
NLuint jj;
NLRowColumn* Ri2 = &(M->row[i2]);
NLCoeff* c = NULL;
nl_debug_assert(i1 < M->m);
nl_debug_assert(i2 < M->m);
for(jj=0; jj<Ri2->size; ++jj) {
c = &(Ri2->coeff[jj]);
nlSparseMatrixAdd(M, i1, c->index, s*c->value);
}
}
void nlSparseMatrixScaleRow(
NLSparseMatrix* M, NLuint i, double s
) {
NLuint jj;
NLRowColumn* Ri = &(M->row[i]);
NLCoeff* c = NULL;
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(!(M->storage & NL_MATRIX_STORE_COLUMNS));
nl_debug_assert(i < M->m);
for(jj=0; jj<Ri->size; ++jj) {
c = &(Ri->coeff[jj]);
c->value *= s;
}
if(i < M->diag_size) {
M->diag[i] *= s;
}
}
void nlSparseMatrixZeroRow(
NLSparseMatrix* M, NLuint i
) {
NLRowColumn* Ri = &(M->row[i]);
nl_debug_assert(i < M->m);
Ri->size = 0;
if(i < M->diag_size) {
M->diag[i] = 0.0;
}
}
/*****************************************************************************/
/* SparseMatrix x Vector routines, internal helper routines */
static void nlSparseMatrix_mult_rows_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint m = A->m;
NLuint i,ij;
NLCoeff* c = NULL;
for(i=0; i<m; i++) {
NLRowColumn* Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<Ri->size; ++ij) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
if(i != c->index) {
y[c->index] += c->value * x[i];
}
}
}
}
static void nlSparseMatrix_mult_rows(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
/*
* Note: OpenMP does not like unsigned ints
* (causes some floating point exceptions),
* therefore I use here signed ints for all
* indices.
*/
int m = (int)(A->m);
int i,ij;
NLCoeff* c = NULL;
NLRowColumn* Ri = NULL;
#if defined(_OPENMP)
#pragma omp parallel for private(i,ij,c,Ri)
#endif
for(i=0; i<m; i++) {
Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<(int)(Ri->size); ij++) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
}
}
}
static void nlSparseMatrix_mult_cols_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
y[j] = 0;
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
if(j != c->index) {
y[j] += c->value * x[c->index];
}
}
}
}
static void nlSparseMatrix_mult_cols(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
NL_CLEAR_ARRAY(NLdouble, y, A->m);
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
}
}
}
void nlSparseMatrixMult(
NLSparseMatrix* A, const NLdouble* x, NLdouble* y
) {
nl_assert(A->type == NL_MATRIX_SPARSE_DYNAMIC);
if(A->storage & NL_MATRIX_STORE_ROWS) {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_rows_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_rows(A, x, y);
}
} else {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_cols_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_cols(A, x, y);
}
}
nlHostBlas()->flops += (NLulong)(2*nlSparseMatrixNNZ(A));
}
NLMatrix nlSparseMatrixNew(
NLuint m, NLuint n, NLenum storage
) {
NLSparseMatrix* result = NL_NEW(NLSparseMatrix);
nlSparseMatrixConstruct(result, m, n, storage);
return (NLMatrix)result;
}
void nlSparseMatrixConstruct(
NLSparseMatrix* M, NLuint m, NLuint n, NLenum storage
) {
NLuint i;
M->m = m;
M->n = n;
M->type = NL_MATRIX_SPARSE_DYNAMIC;
M->destroy_func = (NLDestroyMatrixFunc)nlSparseMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlSparseMatrixMult;
M->storage = storage;
if(storage & NL_MATRIX_STORE_ROWS) {
M->row = NL_NEW_ARRAY(NLRowColumn, m);
M->row_capacity = m;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->row[i]));
}
} else {
M->row = NULL;
M->row_capacity = 0;
}
if(storage & NL_MATRIX_STORE_COLUMNS) {
M->column = NL_NEW_ARRAY(NLRowColumn, n);
M->column_capacity = n;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->column[i]));
}
} else {
M->column = NULL;
M->column_capacity = 0;
}
M->diag_size = MIN(m,n);
M->diag_capacity = M->diag_size;
M->diag = NL_NEW_ARRAY(NLdouble, M->diag_size);
}
/**
* \brief Adjusts the size of the diagonal of
* an NLSparseMatrix after the number of rows or c
* olumns have changed.
* \param[in,out] M a pointer to the sparse matrix.
*/
static void adjust_diag(NLSparseMatrix* M) {
NLuint new_diag_size = MIN(M->m, M->n);
NLuint i;
if(new_diag_size > M->diag_size) {
if(new_diag_size > M->diag_capacity) {
M->diag_capacity *= 2;
if(M->diag_capacity == 0) {
M->diag_capacity = 16;
}
M->diag = NL_RENEW_ARRAY(double, M->diag, M->diag_capacity);
for(i=M->diag_size; i<new_diag_size; ++i) {
M->diag[i] = 0.0;
}
}
M->diag_size= new_diag_size;
}
}
void nlSparseMatrixAddRow( NLSparseMatrix* M) {
++M->m;
if(M->storage & NL_MATRIX_STORE_ROWS) {
if(M->m > M->row_capacity) {
M->row_capacity *= 2;
if(M->row_capacity == 0) {
M->row_capacity = 16;
}
M->row = NL_RENEW_ARRAY(
NLRowColumn, M->row, M->row_capacity
);
}
nlRowColumnConstruct(&(M->row[M->m-1]));
}
adjust_diag(M);
}
void nlSparseMatrixAddColumn( NLSparseMatrix* M) {
++M->n;
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
if(M->n > M->column_capacity) {
M->column_capacity *= 2;
if(M->column_capacity == 0) {
M->column_capacity = 16;
}
M->column = NL_RENEW_ARRAY(
NLRowColumn, M->column, M->column_capacity
);
}
nlRowColumnConstruct(&(M->column[M->n-1]));
}
adjust_diag(M);
}
/*****************************************************************/
NLMatrix nlCRSMatrixNewFromSparseMatrix(NLSparseMatrix* M) {
NLuint nnz = nlSparseMatrixNNZ(M);
NLuint nslices = 8; /* TODO: get number of cores */
NLuint i,ij,k;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->m == M->n);
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
} else {
nlCRSMatrixConstruct(CRS, M->m, M->n, nnz, nslices);
}
nlSparseMatrixSort(M);
/* Convert matrix to CRS format */
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(ij=0; ij<Ri->size; ij++) {
NLCoeff* c = &(Ri->coeff[ij]);
CRS->val[k] = c->value;
CRS->colind[k] = c->index;
++k;
}
}
CRS->rowptr[M->m] = k;
nlCRSMatrixComputeSlices(CRS);
return (NLMatrix)CRS;
}
NLMatrix nlCRSMatrixNewFromSparseMatrixSymmetric(NLSparseMatrix* M) {
NLuint nnz;
NLuint i,j,jj,k;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(M->m == M->n);
nlSparseMatrixSort(M);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nnz = nlSparseMatrixNNZ(M);
} else {
nnz = 0;
for(i=0; i<M->n; ++i) {
NLRowColumn* Ri = &M->row[i];
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if(j <= i) {
++nnz;
}
}
}
}
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if((M->storage & NL_MATRIX_STORE_SYMMETRIC)) {
nl_debug_assert(j <= i);
}
if(j <= i) {
CRS->val[k] = Ri->coeff[jj].value;
CRS->colind[k] = j;
++k;
}
}
}
CRS->rowptr[M->m] = k;
return (NLMatrix)CRS;
}
void nlMatrixCompress(NLMatrix* M) {
NLMatrix result = NULL;
if(
(*M)->type == NL_MATRIX_CRS &&
nlExtensionIsInitialized_MKL()
) {
result = nlMKLMatrixNewFromCRSMatrix((NLCRSMatrix*)*M);
nlDeleteMatrix(*M);
*M = result;
return;
}
if((*M)->type != NL_MATRIX_SPARSE_DYNAMIC) {
return;
}
if(nlExtensionIsInitialized_MKL()) {
result = nlMKLMatrixNewFromSparseMatrix((NLSparseMatrix*)*M);
} else {
result = nlCRSMatrixNewFromSparseMatrix((NLSparseMatrix*)*M);
}
nlDeleteMatrix(*M);
*M = result;
}
NLuint nlMatrixNNZ(NLMatrix M) {
if(M->type == NL_MATRIX_SPARSE_DYNAMIC) {
return nlSparseMatrixNNZ((NLSparseMatrix*)M);
} else if(M->type == NL_MATRIX_CRS) {
return nlCRSMatrixNNZ((NLCRSMatrix*)M);
}
return M->m * M->n;
}
NLMatrix nlMatrixFactorize(NLMatrix M, NLenum solver) {
NLMatrix result = NULL;
switch(solver) {
case NL_SUPERLU_EXT:
case NL_PERM_SUPERLU_EXT:
case NL_SYMMETRIC_SUPERLU_EXT:
result = nlMatrixFactorize_SUPERLU(M,solver);
break;
case NL_CHOLMOD_EXT:
result = nlMatrixFactorize_CHOLMOD(M,solver);
break;
default:
nlError("nlMatrixFactorize","unknown solver");
}
return result;
}
/*****************************************************************/
/**
* \brief A matrix class implemented by a function.
*/
typedef struct {
/**
* \brief number of rows
*/
NLuint m;
/**
* \brief number of columns
*/
NLuint n;
/**
* \brief Matrix type
* \details One of NL_MATRIX_SPARSE_DYNAMIC,
* NL_MATRIX_CRS, NL_MATRIX_SUPERLU_EXT,
* NL_MATRIX_CHOLDMOD_EXT, NL_MATRIX_FUNCTION,
* NL_MATRIX_OTHER
*/
NLenum type;
/**
* \brief Destructor
*/
NLDestroyMatrixFunc destroy_func;
/**
* \brief Matrix x vector product (abstract matrix API,
* takes matrix, rhs and lhs)
*/
NLMultMatrixVectorFunc mult_func;
/**
* \brief Matrix x vector product (user API, only takes
* rhs and lhs).
*/
NLMatrixFunc matrix_func;
} NLFunctionMatrix;
static void nlFunctionMatrixDestroy(NLFunctionMatrix* M) {
(void)M; /* to avoid 'unused parameter' warning */
/*
* Nothing special to do,
* there is no dynamic allocated mem.
*/
}
static void nlFunctionMatrixMult(
NLFunctionMatrix* M, const NLdouble* x, NLdouble* y
) {
M->matrix_func(x,y);
}
NLMatrix nlMatrixNewFromFunction(NLuint m, NLuint n, NLMatrixFunc func) {
NLFunctionMatrix* result = NL_NEW(NLFunctionMatrix);
result->m = m;
result->n = n;
result->type = NL_MATRIX_FUNCTION;
result->destroy_func = (NLDestroyMatrixFunc)nlFunctionMatrixDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlFunctionMatrixMult;
result->matrix_func = func;
return (NLMatrix)result;
}
NLMatrixFunc nlMatrixGetFunction(NLMatrix M) {
if(M == NULL) {
return NULL;
}
if(M->type != NL_MATRIX_FUNCTION) {
return NULL;
}
return ((NLFunctionMatrix*)M)->matrix_func;
}
/******************************************************************************/
/**
* \brief A matrix class that implements the product between two matrices.
*/
typedef struct {
/**
* \brief number of rows
*/
NLuint m;
/**
* \brief number of columns
*/
NLuint n;
/**
* \brief matrix type, NL_MATRIX_OTHER
*/
NLenum type;
/**
* \brief Destructor
*/
NLDestroyMatrixFunc destroy_func;
/**
* \brief Matrix x vector product (abstract matrix API,
* takes matrix, rhs and lhs)
*/
NLMultMatrixVectorFunc mult_func;
/**
* \brief Matrix x vector product (user API, only takes
* rhs and lhs).
*/
NLMatrixFunc matrix_func;
/**
* \brief First matrix of the product.
*/
NLMatrix M;
/**
* \brief NL_TRUE if memory ownership was transferred,
* NL_FALSE otherwise.
*/
NLboolean owns_M;
/**
* \brief Second matrix of the product.
*/
NLMatrix N;
/**
* \brief NL_TRUE if memory ownership was transferred,
* NL_FALSE otherwise.
*/
NLboolean owns_N;
/**
* \brief A temporary vector of dimension N->m (= M->n)
*/
NLdouble* work;
} NLMatrixProduct;
static void nlMatrixProductDestroy(NLMatrixProduct* P) {
NL_DELETE_ARRAY(P->work);
if(P->owns_M) {
nlDeleteMatrix(P->M); P->M = NULL;
}
if(P->owns_N) {
nlDeleteMatrix(P->N); P->N = NULL;
}
}
static void nlMatrixProductMult(
NLMatrixProduct* P, const NLdouble* x, NLdouble* y
) {
nlMultMatrixVector(P->N, x, P->work);
nlMultMatrixVector(P->M, P->work, y);
}
NLMatrix nlMatrixNewFromProduct(
NLMatrix M, NLboolean owns_M, NLMatrix N, NLboolean owns_N
) {
NLMatrixProduct* result = NL_NEW(NLMatrixProduct);
nl_assert(M->n == N->m);
result->m = M->m;
result->n = N->n;
result->type = NL_MATRIX_OTHER;
result->work = NL_NEW_ARRAY(NLdouble,N->m);
result->destroy_func = (NLDestroyMatrixFunc)nlMatrixProductDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlMatrixProductMult;
result->M = M;
result->owns_M = owns_M;
result->N = N;
result->owns_N = owns_N;
return (NLMatrix)result;
}
/******************************************************************************/
|
GB_binop__iseq_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int64)
// A*D function (colscale): GB (_AxD__iseq_int64)
// D*A function (rowscale): GB (_DxB__iseq_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int64)
// C=scalar+B GB (_bind1st__iseq_int64)
// C=scalar+B' GB (_bind1st_tran__iseq_int64)
// C=A+scalar GB (_bind2nd__iseq_int64)
// C=A'+scalar GB (_bind2nd_tran__iseq_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT64 || GxB_NO_ISEQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_lock.c | #include <omp.h>
#include <stdio.h>
#define THREADS 512
#ifdef WAVE_SIZE
#define WARPSIZE WAVE_SIZE
#else
#define WARPSIZE 64
#endif
#pragma omp declare target
omp_lock_t lock;
#pragma omp end declare target
int main() {
if (WARPSIZE == 32)
return 0;
int error = 0;
unsigned count = 0; // incremented within target region
unsigned expected_count = 0; // incremented on host
#pragma omp target
omp_init_lock(&lock);
// The lock implementation picks a thread from the warp to avoid the
// deadlock that results if multiple threads try to CAS-loop at once
// The lower/upper construct checks various active warp patterns
const int edges[] = {0, 1, 32, 62, 63};
const int N = sizeof(edges) / sizeof(edges[0]);
for (int l = 0; l < N; l++) {
for (int u = 0; u < N; u++) {
int lower = edges[l];
int upper = edges[u];
if (lower > upper)
continue;
expected_count += THREADS / WARPSIZE;
#pragma omp target parallel num_threads(THREADS) map(tofrom : error, count)
{
int lane_id = omp_ext_get_lane_id();
if (lane_id >= lower && lane_id <= upper) {
omp_set_lock(&lock); // mutex acts on a per warp basis
if (omp_ext_get_lane_id() == lower) {
// Increment once per warp
count++;
}
if (!omp_test_lock(&lock)) {
error = 1;
}
omp_unset_lock(&lock);
}
}
}
}
#pragma omp target
omp_destroy_lock(&lock);
if (count != expected_count) {
error = 1;
}
fprintf(stderr, "ec %d c %d\n", expected_count, count);
return error;
}
|
nested.c | // RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true OMP_PLACES=threads OMP_PROC_BIND=spread,close %libomp-run | %python %S/check.py -c 'CHECK' %s
// REQUIRES: affinity
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L at:%a tn:%n nt:%N");
omp_set_nested(1);
#pragma omp parallel num_threads(4)
{
#pragma omp parallel num_threads(3)
{ }
}
return 0;
}
// CHECK: num_threads=4 TESTER: tl:1 at:0 tn:[0-3] nt:4
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
|
GB_is_diagonal.c | //------------------------------------------------------------------------------
// GB_is_diagonal: check if A is a diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Returns true if A is a square diagonal matrix, with all diagonal entries
// present. All pending tuples are ignored. Zombies are treated as entries.
#include "GB_mxm.h"
#include "GB_atomics.h"
bool GB_is_diagonal // true if A is diagonal
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (A != NULL) ;
ASSERT_MATRIX_OK (A, "A check diag", GB0) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_PENDING (A)) ;
//--------------------------------------------------------------------------
// trivial cases
//--------------------------------------------------------------------------
int64_t n = GB_NROWS (A) ;
int64_t ncols = GB_NCOLS (A) ;
if (n != ncols)
{
// A is rectangular
return (false) ;
}
if (GB_IS_BITMAP (A))
{
// never treat bitmaps as diagonal
return (false) ;
}
if (GB_IS_FULL (A))
{
// A is full, and is diagonal only if 1-by-1, but always return
// false so that GB_AxB_rowscale and GB_AxB_colscale are not used
// by GB_reduce_to_vector.
return (false) ;
}
int64_t anz = GB_nnz (A) ;
int64_t nvec = A->nvec ;
if (n != anz || n != nvec)
{
// A must have exactly n entries in n vectors. A can be sparse or
// hypersparse. If hypersparse, all vectors must be present, so
// Ap has size n+1 whether sparse or hypersparse.
return (false) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
// Break the work into lots of tasks so the early-exit can be exploited.
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (256 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//--------------------------------------------------------------------------
// examine each vector of A
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
int diagonal = true ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// check for early exit
//----------------------------------------------------------------------
int diag = true ;
{
GB_ATOMIC_READ
diag = diagonal ;
}
if (!diag) continue ;
//----------------------------------------------------------------------
// check if vectors jstart:jend-1 are diagonal
//----------------------------------------------------------------------
int64_t jstart, jend ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
for (int64_t j = jstart ; diag && j < jend ; j++)
{
int64_t p = Ap [j] ;
int64_t ajnz = Ap [j+1] - p ;
if (ajnz != 1)
{
// A(:,j) must have exactly one entry
diag = false ;
}
int64_t i = Ai [p] ;
if (i != j)
{
// the single entry must be A(i,i)
diag = false ;
}
}
//----------------------------------------------------------------------
// early exit: tell all other tasks to halt
//----------------------------------------------------------------------
if (!diag)
{
GB_ATOMIC_WRITE
diagonal = false ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return ((bool) diagonal) ;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unaryop__ainv_int64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_int32
// op(A') function: GB_tran__ainv_int64_int32
// C type: int64_t
// A type: int32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_int32
(
int64_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
finalize-1.c | /* Test valid usage and processing of the finalize clause. */
/* { dg-additional-options "-fdump-tree-original -fdump-tree-gimple" } */
extern int del_r;
extern float del_f[3];
extern char *del_f_p;
extern double cpo_r[8];
extern long cpo_f;
extern char *cpo_f_p;
void f ()
{
#pragma acc exit data delete (del_r)
/* { dg-final { scan-tree-dump-times "(?n)#pragma acc exit data map\\(release:del_r\\);$" 1 "original" } }
{ dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_enter_exit_data map\\(release:del_r \\\[len: \[0-9\]+\\\]\\)$" 1 "gimple" } } */
#pragma acc exit data finalize delete (del_f)
/* { dg-final { scan-tree-dump-times "(?n)#pragma acc exit data map\\(release:del_f\\) finalize;$" 1 "original" } }
{ dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_enter_exit_data map\\(delete:del_f \\\[len: \[0-9\]+\\\]\\) finalize$" 1 "gimple" } } */
#pragma acc exit data finalize delete (del_f_p[2:5])
/* { dg-final { scan-tree-dump-times "(?n)#pragma acc exit data map\\(release:\\*\\(del_f_p \\+ 2\\) \\\[len: 5\\\]\\) map\\(firstprivate:del_f_p \\\[pointer assign, bias: 2\\\]\\) finalize;$" 1 "original" } }
{ dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_enter_exit_data map\\(delete:\[^ \]+ \\\[len: 5\\\]\\) finalize$" 1 "gimple" } } */
#pragma acc exit data copyout (cpo_r)
/* { dg-final { scan-tree-dump-times "(?n)#pragma acc exit data map\\(from:cpo_r\\);$" 1 "original" } }
{ dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_enter_exit_data map\\(from:cpo_r \\\[len: \[0-9\]+\\\]\\)$" 1 "gimple" } } */
#pragma acc exit data copyout (cpo_f) finalize
/* { dg-final { scan-tree-dump-times "(?n)#pragma acc exit data finalize map\\(from:cpo_f\\);$" 1 "original" } }
{ dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_enter_exit_data finalize map\\(force_from:cpo_f \\\[len: \[0-9\]+\\\]\\)$" 1 "gimple" } } */
#pragma acc exit data copyout (cpo_f_p[4:10]) finalize
/* { dg-final { scan-tree-dump-times "(?n)#pragma acc exit data finalize map\\(from:\\*\\(cpo_f_p \\+ 4\\) \\\[len: 10\\\]\\) map\\(firstprivate:cpo_f_p \\\[pointer assign, bias: 4\\\]\\);$" 1 "original" } }
{ dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_enter_exit_data finalize map\\(force_from:\[^ \]+ \\\[len: 10\\\]\\)$" 1 "gimple" } } */
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
cpu_adam.h | #pragma once
#define NOMINMAX // Windows idiosyncrasy
// https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <cassert>
#include "cuda.h"
#include "custom_cuda_layers.h"
#include "simd.h"
#define STEP(SPAN) \
void Step_##SPAN(float* _params, \
float* grads, \
float* _exp_avg, \
float* _exp_avg_sq, \
size_t _param_size, \
__half* dev_param = nullptr, \
bool half_precision = false);
class Adam_Optimizer {
public:
Adam_Optimizer(float alpha = 1e-3,
float betta1 = 0.9,
float betta2 = 0.999,
float eps = 1e-8,
float weight_decay = 0,
bool adamw_mode = true)
: _alpha(alpha),
_betta1(betta1),
_betta2(betta2),
_eps(eps),
_weight_decay(weight_decay),
_betta1_t(1.0),
_betta2_t(1.0),
_step(0),
_buf_index(false),
_adamw_mode(adamw_mode)
{
cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
_streams[0] = Context::Instance().GetCurrentStream();
_streams[1] = Context::Instance().GetNewStream();
}
~Adam_Optimizer()
{
cudaFreeHost(_doubled_buffer[0]);
cudaFreeHost(_doubled_buffer[1]);
}
#if defined(__AVX512__) or defined(__AVX256__)
template <int span>
void Step_AVX(size_t* rounded_size,
float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sq,
size_t param_size,
__half* dev_param = nullptr,
bool half_precision = false);
#endif
STEP(1)
STEP(4)
STEP(8)
inline void SynchronizeStreams()
{
for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]);
}
inline void IncrementStep(size_t step, float beta1, float beta2)
{
if (beta1 != _betta1 || beta2 != _betta2) {
_step = step;
_betta1 = beta1;
_betta2 = beta2;
_betta1_t = std::pow(_betta1, step);
_betta2_t = std::pow(_betta2, step);
} else {
_step++;
if (_step != step) {
_betta1_t = std::pow(_betta1, step);
_betta2_t = std::pow(_betta2, step);
_step = step;
} else {
_betta1_t *= _betta1;
_betta2_t *= _betta2;
}
}
}
inline void update_state(float lr, float epsilon, float weight_decay, bool bias_correction)
{
_alpha = lr;
_eps = epsilon;
_weight_decay = weight_decay;
_bias_correction1 = 1.0f;
_bias_correction2 = 1.0f;
if (bias_correction == 1) {
_bias_correction1 = 1 - _betta1_t;
_bias_correction2 = 1 / sqrt(1 - _betta2_t);
}
}
private:
float _alpha;
float _betta1;
float _betta2;
float _eps;
float _weight_decay;
float _betta1_t;
float _betta2_t;
size_t _step;
float _bias_correction1;
float _bias_correction2;
float* _doubled_buffer[2];
bool _buf_index;
bool _adamw_mode;
cudaStream_t _streams[2];
};
#if defined(__AVX512__) or defined(__AVX256__)
template <int span>
void Adam_Optimizer::Step_AVX(size_t* rounded_size,
float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sq,
size_t _param_size,
__half* dev_params,
bool half_precision)
{
size_t new_rounded_size = 0;
AVX_Data betta1_4;
betta1_4.data = SIMD_SET(_betta1);
AVX_Data betta2_4;
betta2_4.data = SIMD_SET(_betta2);
float betta1_minus1 = 1 - _betta1;
float betta2_minus1 = 1 - _betta2;
AVX_Data betta1_minus1_4;
betta1_minus1_4.data = SIMD_SET(betta1_minus1);
AVX_Data betta2_minus1_4;
betta2_minus1_4.data = SIMD_SET(betta2_minus1);
AVX_Data bias2_sqrt;
bias2_sqrt.data = SIMD_SET(_bias_correction2);
AVX_Data eps_4;
eps_4.data = SIMD_SET(_eps);
float step_size = -1 * _alpha / _bias_correction1;
AVX_Data step_size_4;
step_size_4.data = SIMD_SET(step_size);
float w_decay = -1 * _alpha * _weight_decay;
AVX_Data weight_decay4;
if (_weight_decay > 0)
weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay));
new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span);
for (size_t t = 0; t < new_rounded_size; t += TILE) {
size_t copy_size = TILE;
if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t;
size_t offset = copy_size + t;
if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
#pragma omp parallel for
for (size_t i = t; i < offset; i += SIMD_WIDTH * span) {
AVX_Data grad_4[span];
simd_load<span>(grad_4, grads + i, half_precision);
AVX_Data momentum_4[span];
simd_load<span>(momentum_4, _exp_avg + i, false);
AVX_Data variance_4[span];
simd_load<span>(variance_4, _exp_avg_sq + i, false);
AVX_Data param_4[span];
simd_load<span>(param_4, _params + i, half_precision);
if (_weight_decay > 0 && !_adamw_mode) {
simd_fma<span>(grad_4, param_4, weight_decay4, grad_4);
}
simd_mul<span>(momentum_4, momentum_4, betta1_4);
simd_fma<span>(momentum_4, grad_4, betta1_minus1_4, momentum_4);
simd_mul<span>(variance_4, variance_4, betta2_4);
simd_mul<span>(grad_4, grad_4, grad_4);
simd_fma<span>(variance_4, grad_4, betta2_minus1_4, variance_4);
simd_sqrt<span>(grad_4, variance_4);
simd_fma<span>(grad_4, grad_4, bias2_sqrt, eps_4);
simd_div<span>(grad_4, momentum_4, grad_4);
if (_weight_decay > 0 && _adamw_mode) {
simd_fma<span>(param_4, param_4, weight_decay4, param_4);
}
simd_fma<span>(param_4, grad_4, step_size_4, param_4);
simd_store<span>(_params + i, param_4, half_precision);
if (dev_params) {
simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision);
}
simd_store<span>(_exp_avg + i, momentum_4, false);
simd_store<span>(_exp_avg_sq + i, variance_4, false);
}
if (dev_params) {
if (half_precision)
launch_param_update_half(
_doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
else
launch_param_update(
_doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
_buf_index = !_buf_index;
}
}
*rounded_size = new_rounded_size;
}
#endif
|
semantics.c | /* Perform the semantic phase of parsing, i.e., the process of
building tree structure, checking semantic consistency, and
building RTL. These routines are used both during actual parsing
and during the instantiation of template functions.
Copyright (C) 1998-2017 Free Software Foundation, Inc.
Written by Mark Mitchell (mmitchell@usa.net) based on code found
formerly in parse.y and pt.c.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "bitmap.h"
#include "cp-tree.h"
#include "stringpool.h"
#include "cgraph.h"
#include "stmt.h"
#include "varasm.h"
#include "stor-layout.h"
#include "c-family/c-objc.h"
#include "tree-inline.h"
#include "intl.h"
#include "tree-iterator.h"
#include "omp-general.h"
#include "convert.h"
#include "gomp-constants.h"
/* There routines provide a modular interface to perform many parsing
operations. They may therefore be used during actual parsing, or
during template instantiation, which may be regarded as a
degenerate form of parsing. */
static tree maybe_convert_cond (tree);
static tree finalize_nrv_r (tree *, int *, void *);
static tree capture_decltype (tree);
/* Used for OpenMP non-static data member privatization. */
static hash_map<tree, tree> *omp_private_member_map;
static vec<tree> omp_private_member_vec;
static bool omp_private_member_ignore_next;
/* Deferred Access Checking Overview
---------------------------------
Most C++ expressions and declarations require access checking
to be performed during parsing. However, in several cases,
this has to be treated differently.
For member declarations, access checking has to be deferred
until more information about the declaration is known. For
example:
class A {
typedef int X;
public:
X f();
};
A::X A::f();
A::X g();
When we are parsing the function return type `A::X', we don't
really know if this is allowed until we parse the function name.
Furthermore, some contexts require that access checking is
never performed at all. These include class heads, and template
instantiations.
Typical use of access checking functions is described here:
1. When we enter a context that requires certain access checking
mode, the function `push_deferring_access_checks' is called with
DEFERRING argument specifying the desired mode. Access checking
may be performed immediately (dk_no_deferred), deferred
(dk_deferred), or not performed (dk_no_check).
2. When a declaration such as a type, or a variable, is encountered,
the function `perform_or_defer_access_check' is called. It
maintains a vector of all deferred checks.
3. The global `current_class_type' or `current_function_decl' is then
setup by the parser. `enforce_access' relies on these information
to check access.
4. Upon exiting the context mentioned in step 1,
`perform_deferred_access_checks' is called to check all declaration
stored in the vector. `pop_deferring_access_checks' is then
called to restore the previous access checking mode.
In case of parsing error, we simply call `pop_deferring_access_checks'
without `perform_deferred_access_checks'. */
struct GTY(()) deferred_access {
/* A vector representing name-lookups for which we have deferred
checking access controls. We cannot check the accessibility of
names used in a decl-specifier-seq until we know what is being
declared because code like:
class A {
class B {};
B* f();
}
A::B* A::f() { return 0; }
is valid, even though `A::B' is not generally accessible. */
vec<deferred_access_check, va_gc> * GTY(()) deferred_access_checks;
/* The current mode of access checks. */
enum deferring_kind deferring_access_checks_kind;
};
/* Data for deferred access checking. */
static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack;
static GTY(()) unsigned deferred_access_no_check;
/* Save the current deferred access states and start deferred
access checking iff DEFER_P is true. */
void
push_deferring_access_checks (deferring_kind deferring)
{
/* For context like template instantiation, access checking
disabling applies to all nested context. */
if (deferred_access_no_check || deferring == dk_no_check)
deferred_access_no_check++;
else
{
deferred_access e = {NULL, deferring};
vec_safe_push (deferred_access_stack, e);
}
}
/* Save the current deferred access states and start deferred access
checking, continuing the set of deferred checks in CHECKS. */
void
reopen_deferring_access_checks (vec<deferred_access_check, va_gc> * checks)
{
push_deferring_access_checks (dk_deferred);
if (!deferred_access_no_check)
deferred_access_stack->last().deferred_access_checks = checks;
}
/* Resume deferring access checks again after we stopped doing
this previously. */
void
resume_deferring_access_checks (void)
{
if (!deferred_access_no_check)
deferred_access_stack->last().deferring_access_checks_kind = dk_deferred;
}
/* Stop deferring access checks. */
void
stop_deferring_access_checks (void)
{
if (!deferred_access_no_check)
deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred;
}
/* Discard the current deferred access checks and restore the
previous states. */
void
pop_deferring_access_checks (void)
{
if (deferred_access_no_check)
deferred_access_no_check--;
else
deferred_access_stack->pop ();
}
/* Returns a TREE_LIST representing the deferred checks.
The TREE_PURPOSE of each node is the type through which the
access occurred; the TREE_VALUE is the declaration named.
*/
vec<deferred_access_check, va_gc> *
get_deferred_access_checks (void)
{
if (deferred_access_no_check)
return NULL;
else
return (deferred_access_stack->last().deferred_access_checks);
}
/* Take current deferred checks and combine with the
previous states if we also defer checks previously.
Otherwise perform checks now. */
void
pop_to_parent_deferring_access_checks (void)
{
if (deferred_access_no_check)
deferred_access_no_check--;
else
{
vec<deferred_access_check, va_gc> *checks;
deferred_access *ptr;
checks = (deferred_access_stack->last ().deferred_access_checks);
deferred_access_stack->pop ();
ptr = &deferred_access_stack->last ();
if (ptr->deferring_access_checks_kind == dk_no_deferred)
{
/* Check access. */
perform_access_checks (checks, tf_warning_or_error);
}
else
{
/* Merge with parent. */
int i, j;
deferred_access_check *chk, *probe;
FOR_EACH_VEC_SAFE_ELT (checks, i, chk)
{
FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe)
{
if (probe->binfo == chk->binfo &&
probe->decl == chk->decl &&
probe->diag_decl == chk->diag_decl)
goto found;
}
/* Insert into parent's checks. */
vec_safe_push (ptr->deferred_access_checks, *chk);
found:;
}
}
}
}
/* Perform the access checks in CHECKS. The TREE_PURPOSE of each node
is the BINFO indicating the qualifying scope used to access the
DECL node stored in the TREE_VALUE of the node. If CHECKS is empty
or we aren't in SFINAE context or all the checks succeed return TRUE,
otherwise FALSE. */
bool
perform_access_checks (vec<deferred_access_check, va_gc> *checks,
tsubst_flags_t complain)
{
int i;
deferred_access_check *chk;
location_t loc = input_location;
bool ok = true;
if (!checks)
return true;
FOR_EACH_VEC_SAFE_ELT (checks, i, chk)
{
input_location = chk->loc;
ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain);
}
input_location = loc;
return (complain & tf_error) ? true : ok;
}
/* Perform the deferred access checks.
After performing the checks, we still have to keep the list
`deferred_access_stack->deferred_access_checks' since we may want
to check access for them again later in a different context.
For example:
class A {
typedef int X;
static X a;
};
A::X A::a, x; // No error for `A::a', error for `x'
We have to perform deferred access of `A::X', first with `A::a',
next with `x'. Return value like perform_access_checks above. */
bool
perform_deferred_access_checks (tsubst_flags_t complain)
{
return perform_access_checks (get_deferred_access_checks (), complain);
}
/* Defer checking the accessibility of DECL, when looked up in
BINFO. DIAG_DECL is the declaration to use to print diagnostics.
Return value like perform_access_checks above. */
bool
perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl,
tsubst_flags_t complain)
{
int i;
deferred_access *ptr;
deferred_access_check *chk;
/* Exit if we are in a context that no access checking is performed.
*/
if (deferred_access_no_check)
return true;
gcc_assert (TREE_CODE (binfo) == TREE_BINFO);
ptr = &deferred_access_stack->last ();
/* If we are not supposed to defer access checks, just check now. */
if (ptr->deferring_access_checks_kind == dk_no_deferred)
{
bool ok = enforce_access (binfo, decl, diag_decl, complain);
return (complain & tf_error) ? true : ok;
}
/* See if we are already going to perform this check. */
FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk)
{
if (chk->decl == decl && chk->binfo == binfo &&
chk->diag_decl == diag_decl)
{
return true;
}
}
/* If not, record the check. */
deferred_access_check new_access = {binfo, decl, diag_decl, input_location};
vec_safe_push (ptr->deferred_access_checks, new_access);
return true;
}
/* Returns nonzero if the current statement is a full expression,
i.e. temporaries created during that statement should be destroyed
at the end of the statement. */
int
stmts_are_full_exprs_p (void)
{
return current_stmt_tree ()->stmts_are_full_exprs_p;
}
/* T is a statement. Add it to the statement-tree. This is the C++
version. The C/ObjC frontends have a slightly different version of
this function. */
tree
add_stmt (tree t)
{
enum tree_code code = TREE_CODE (t);
if (EXPR_P (t) && code != LABEL_EXPR)
{
if (!EXPR_HAS_LOCATION (t))
SET_EXPR_LOCATION (t, input_location);
/* When we expand a statement-tree, we must know whether or not the
statements are full-expressions. We record that fact here. */
STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p ();
}
if (code == LABEL_EXPR || code == CASE_LABEL_EXPR)
STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1;
/* Add T to the statement-tree. Non-side-effect statements need to be
recorded during statement expressions. */
gcc_checking_assert (!stmt_list_stack->is_empty ());
append_to_statement_list_force (t, &cur_stmt_list);
return t;
}
/* Returns the stmt_tree to which statements are currently being added. */
stmt_tree
current_stmt_tree (void)
{
return (cfun
? &cfun->language->base.x_stmt_tree
: &scope_chain->x_stmt_tree);
}
/* If statements are full expressions, wrap STMT in a CLEANUP_POINT_EXPR. */
static tree
maybe_cleanup_point_expr (tree expr)
{
if (!processing_template_decl && stmts_are_full_exprs_p ())
expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr);
return expr;
}
/* Like maybe_cleanup_point_expr except have the type of the new expression be
void so we don't need to create a temporary variable to hold the inner
expression. The reason why we do this is because the original type might be
an aggregate and we cannot create a temporary variable for that type. */
tree
maybe_cleanup_point_expr_void (tree expr)
{
if (!processing_template_decl && stmts_are_full_exprs_p ())
expr = fold_build_cleanup_point_expr (void_type_node, expr);
return expr;
}
/* Create a declaration statement for the declaration given by the DECL. */
void
add_decl_expr (tree decl)
{
tree r = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl);
if (DECL_INITIAL (decl)
|| (DECL_SIZE (decl) && TREE_SIDE_EFFECTS (DECL_SIZE (decl))))
r = maybe_cleanup_point_expr_void (r);
add_stmt (r);
}
/* Finish a scope. */
tree
do_poplevel (tree stmt_list)
{
tree block = NULL;
if (stmts_are_full_exprs_p ())
block = poplevel (kept_level_p (), 1, 0);
stmt_list = pop_stmt_list (stmt_list);
if (!processing_template_decl)
{
stmt_list = c_build_bind_expr (input_location, block, stmt_list);
/* ??? See c_end_compound_stmt re statement expressions. */
}
return stmt_list;
}
/* Begin a new scope. */
static tree
do_pushlevel (scope_kind sk)
{
tree ret = push_stmt_list ();
if (stmts_are_full_exprs_p ())
begin_scope (sk, NULL);
return ret;
}
/* Queue a cleanup. CLEANUP is an expression/statement to be executed
when the current scope is exited. EH_ONLY is true when this is not
meant to apply to normal control flow transfer. */
void
push_cleanup (tree decl, tree cleanup, bool eh_only)
{
tree stmt = build_stmt (input_location, CLEANUP_STMT, NULL, cleanup, decl);
CLEANUP_EH_ONLY (stmt) = eh_only;
add_stmt (stmt);
CLEANUP_BODY (stmt) = push_stmt_list ();
}
/* Simple infinite loop tracking for -Wreturn-type. We keep a stack of all
the current loops, represented by 'NULL_TREE' if we've seen a possible
exit, and 'error_mark_node' if not. This is currently used only to
suppress the warning about a function with no return statements, and
therefore we don't bother noting returns as possible exits. We also
don't bother with gotos. */
static void
begin_maybe_infinite_loop (tree cond)
{
/* Only track this while parsing a function, not during instantiation. */
if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl)
&& !processing_template_decl))
return;
bool maybe_infinite = true;
if (cond)
{
cond = fold_non_dependent_expr (cond);
maybe_infinite = integer_nonzerop (cond);
}
vec_safe_push (cp_function_chain->infinite_loops,
maybe_infinite ? error_mark_node : NULL_TREE);
}
/* A break is a possible exit for the current loop. */
void
break_maybe_infinite_loop (void)
{
if (!cfun)
return;
cp_function_chain->infinite_loops->last() = NULL_TREE;
}
/* If we reach the end of the loop without seeing a possible exit, we have
an infinite loop. */
static void
end_maybe_infinite_loop (tree cond)
{
if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl)
&& !processing_template_decl))
return;
tree current = cp_function_chain->infinite_loops->pop();
if (current != NULL_TREE)
{
cond = fold_non_dependent_expr (cond);
if (integer_nonzerop (cond))
current_function_infinite_loop = 1;
}
}
/* Begin a conditional that might contain a declaration. When generating
normal code, we want the declaration to appear before the statement
containing the conditional. When generating template code, we want the
conditional to be rendered as the raw DECL_EXPR. */
static void
begin_cond (tree *cond_p)
{
if (processing_template_decl)
*cond_p = push_stmt_list ();
}
/* Finish such a conditional. */
static void
finish_cond (tree *cond_p, tree expr)
{
if (processing_template_decl)
{
tree cond = pop_stmt_list (*cond_p);
if (expr == NULL_TREE)
/* Empty condition in 'for'. */
gcc_assert (empty_expr_stmt_p (cond));
else if (check_for_bare_parameter_packs (expr))
expr = error_mark_node;
else if (!empty_expr_stmt_p (cond))
expr = build2 (COMPOUND_EXPR, TREE_TYPE (expr), cond, expr);
}
*cond_p = expr;
}
/* If *COND_P specifies a conditional with a declaration, transform the
loop such that
while (A x = 42) { }
for (; A x = 42;) { }
becomes
while (true) { A x = 42; if (!x) break; }
for (;;) { A x = 42; if (!x) break; }
The statement list for BODY will be empty if the conditional did
not declare anything. */
static void
simplify_loop_decl_cond (tree *cond_p, tree body)
{
tree cond, if_stmt;
if (!TREE_SIDE_EFFECTS (body))
return;
cond = *cond_p;
*cond_p = boolean_true_node;
if_stmt = begin_if_stmt ();
cond = cp_build_unary_op (TRUTH_NOT_EXPR, cond, false, tf_warning_or_error);
finish_if_stmt_cond (cond, if_stmt);
finish_break_stmt ();
finish_then_clause (if_stmt);
finish_if_stmt (if_stmt);
}
/* Finish a goto-statement. */
tree
finish_goto_stmt (tree destination)
{
if (identifier_p (destination))
destination = lookup_label (destination);
/* We warn about unused labels with -Wunused. That means we have to
mark the used labels as used. */
if (TREE_CODE (destination) == LABEL_DECL)
TREE_USED (destination) = 1;
else
{
if (check_no_cilk (destination,
"Cilk array notation cannot be used as a computed goto expression",
"%<_Cilk_spawn%> statement cannot be used as a computed goto expression"))
destination = error_mark_node;
destination = mark_rvalue_use (destination);
if (!processing_template_decl)
{
destination = cp_convert (ptr_type_node, destination,
tf_warning_or_error);
if (error_operand_p (destination))
return NULL_TREE;
destination
= fold_build_cleanup_point_expr (TREE_TYPE (destination),
destination);
}
}
check_goto (destination);
return add_stmt (build_stmt (input_location, GOTO_EXPR, destination));
}
/* COND is the condition-expression for an if, while, etc.,
statement. Convert it to a boolean value, if appropriate.
In addition, verify sequence points if -Wsequence-point is enabled. */
static tree
maybe_convert_cond (tree cond)
{
/* Empty conditions remain empty. */
if (!cond)
return NULL_TREE;
/* Wait until we instantiate templates before doing conversion. */
if (processing_template_decl)
return cond;
if (warn_sequence_point)
verify_sequence_points (cond);
/* Do the conversion. */
cond = convert_from_reference (cond);
if (TREE_CODE (cond) == MODIFY_EXPR
&& !TREE_NO_WARNING (cond)
&& warn_parentheses)
{
warning_at (EXPR_LOC_OR_LOC (cond, input_location), OPT_Wparentheses,
"suggest parentheses around assignment used as truth value");
TREE_NO_WARNING (cond) = 1;
}
return condition_conversion (cond);
}
/* Finish an expression-statement, whose EXPRESSION is as indicated. */
tree
finish_expr_stmt (tree expr)
{
tree r = NULL_TREE;
location_t loc = EXPR_LOCATION (expr);
if (expr != NULL_TREE)
{
/* If we ran into a problem, make sure we complained. */
gcc_assert (expr != error_mark_node || seen_error ());
if (!processing_template_decl)
{
if (warn_sequence_point)
verify_sequence_points (expr);
expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error);
}
else if (!type_dependent_expression_p (expr))
convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT,
tf_warning_or_error);
if (check_for_bare_parameter_packs (expr))
expr = error_mark_node;
/* Simplification of inner statement expressions, compound exprs,
etc can result in us already having an EXPR_STMT. */
if (TREE_CODE (expr) != CLEANUP_POINT_EXPR)
{
if (TREE_CODE (expr) != EXPR_STMT)
expr = build_stmt (loc, EXPR_STMT, expr);
expr = maybe_cleanup_point_expr_void (expr);
}
r = add_stmt (expr);
}
return r;
}
/* Begin an if-statement. Returns a newly created IF_STMT if
appropriate. */
tree
begin_if_stmt (void)
{
tree r, scope;
scope = do_pushlevel (sk_cond);
r = build_stmt (input_location, IF_STMT, NULL_TREE,
NULL_TREE, NULL_TREE, scope);
current_binding_level->this_entity = r;
begin_cond (&IF_COND (r));
return r;
}
/* Process the COND of an if-statement, which may be given by
IF_STMT. */
tree
finish_if_stmt_cond (tree cond, tree if_stmt)
{
cond = maybe_convert_cond (cond);
if (IF_STMT_CONSTEXPR_P (if_stmt)
&& require_potential_rvalue_constant_expression (cond)
&& !value_dependent_expression_p (cond))
{
cond = instantiate_non_dependent_expr (cond);
cond = cxx_constant_value (cond, NULL_TREE);
}
finish_cond (&IF_COND (if_stmt), cond);
add_stmt (if_stmt);
THEN_CLAUSE (if_stmt) = push_stmt_list ();
return cond;
}
/* Finish the then-clause of an if-statement, which may be given by
IF_STMT. */
tree
finish_then_clause (tree if_stmt)
{
THEN_CLAUSE (if_stmt) = pop_stmt_list (THEN_CLAUSE (if_stmt));
return if_stmt;
}
/* Begin the else-clause of an if-statement. */
void
begin_else_clause (tree if_stmt)
{
ELSE_CLAUSE (if_stmt) = push_stmt_list ();
}
/* Finish the else-clause of an if-statement, which may be given by
IF_STMT. */
void
finish_else_clause (tree if_stmt)
{
ELSE_CLAUSE (if_stmt) = pop_stmt_list (ELSE_CLAUSE (if_stmt));
}
/* Finish an if-statement. */
void
finish_if_stmt (tree if_stmt)
{
tree scope = IF_SCOPE (if_stmt);
IF_SCOPE (if_stmt) = NULL;
add_stmt (do_poplevel (scope));
}
/* Begin a while-statement. Returns a newly created WHILE_STMT if
appropriate. */
tree
begin_while_stmt (void)
{
tree r;
r = build_stmt (input_location, WHILE_STMT, NULL_TREE, NULL_TREE);
add_stmt (r);
WHILE_BODY (r) = do_pushlevel (sk_block);
begin_cond (&WHILE_COND (r));
return r;
}
/* Process the COND of a while-statement, which may be given by
WHILE_STMT. */
void
finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep)
{
if (check_no_cilk (cond,
"Cilk array notation cannot be used as a condition for while statement",
"%<_Cilk_spawn%> statement cannot be used as a condition for while statement"))
cond = error_mark_node;
cond = maybe_convert_cond (cond);
finish_cond (&WHILE_COND (while_stmt), cond);
begin_maybe_infinite_loop (cond);
if (ivdep && cond != error_mark_node)
WHILE_COND (while_stmt) = build2 (ANNOTATE_EXPR,
TREE_TYPE (WHILE_COND (while_stmt)),
WHILE_COND (while_stmt),
build_int_cst (integer_type_node,
annot_expr_ivdep_kind));
simplify_loop_decl_cond (&WHILE_COND (while_stmt), WHILE_BODY (while_stmt));
}
/* Finish a while-statement, which may be given by WHILE_STMT. */
void
finish_while_stmt (tree while_stmt)
{
end_maybe_infinite_loop (boolean_true_node);
WHILE_BODY (while_stmt) = do_poplevel (WHILE_BODY (while_stmt));
}
/* Begin a do-statement. Returns a newly created DO_STMT if
appropriate. */
tree
begin_do_stmt (void)
{
tree r = build_stmt (input_location, DO_STMT, NULL_TREE, NULL_TREE);
begin_maybe_infinite_loop (boolean_true_node);
add_stmt (r);
DO_BODY (r) = push_stmt_list ();
return r;
}
/* Finish the body of a do-statement, which may be given by DO_STMT. */
void
finish_do_body (tree do_stmt)
{
tree body = DO_BODY (do_stmt) = pop_stmt_list (DO_BODY (do_stmt));
if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_TAIL (body))
body = STATEMENT_LIST_TAIL (body)->stmt;
if (IS_EMPTY_STMT (body))
warning (OPT_Wempty_body,
"suggest explicit braces around empty body in %<do%> statement");
}
/* Finish a do-statement, which may be given by DO_STMT, and whose
COND is as indicated. */
void
finish_do_stmt (tree cond, tree do_stmt, bool ivdep)
{
if (check_no_cilk (cond,
"Cilk array notation cannot be used as a condition for a do-while statement",
"%<_Cilk_spawn%> statement cannot be used as a condition for a do-while statement"))
cond = error_mark_node;
cond = maybe_convert_cond (cond);
end_maybe_infinite_loop (cond);
if (ivdep && cond != error_mark_node)
cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node, annot_expr_ivdep_kind));
DO_COND (do_stmt) = cond;
}
/* Finish a return-statement. The EXPRESSION returned, if any, is as
indicated. */
tree
finish_return_stmt (tree expr)
{
tree r;
bool no_warning;
expr = check_return_expr (expr, &no_warning);
if (error_operand_p (expr)
|| (flag_openmp && !check_omp_return ()))
{
/* Suppress -Wreturn-type for this function. */
if (warn_return_type)
TREE_NO_WARNING (current_function_decl) = true;
return error_mark_node;
}
if (!processing_template_decl)
{
if (warn_sequence_point)
verify_sequence_points (expr);
if (DECL_DESTRUCTOR_P (current_function_decl)
|| (DECL_CONSTRUCTOR_P (current_function_decl)
&& targetm.cxx.cdtor_returns_this ()))
{
/* Similarly, all destructors must run destructors for
base-classes before returning. So, all returns in a
destructor get sent to the DTOR_LABEL; finish_function emits
code to return a value there. */
return finish_goto_stmt (cdtor_label);
}
}
r = build_stmt (input_location, RETURN_EXPR, expr);
TREE_NO_WARNING (r) |= no_warning;
r = maybe_cleanup_point_expr_void (r);
r = add_stmt (r);
return r;
}
/* Begin the scope of a for-statement or a range-for-statement.
Both the returned trees are to be used in a call to
begin_for_stmt or begin_range_for_stmt. */
tree
begin_for_scope (tree *init)
{
tree scope = NULL_TREE;
if (flag_new_for_scope > 0)
scope = do_pushlevel (sk_for);
if (processing_template_decl)
*init = push_stmt_list ();
else
*init = NULL_TREE;
return scope;
}
/* Begin a for-statement. Returns a new FOR_STMT.
SCOPE and INIT should be the return of begin_for_scope,
or both NULL_TREE */
tree
begin_for_stmt (tree scope, tree init)
{
tree r;
r = build_stmt (input_location, FOR_STMT, NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
if (scope == NULL_TREE)
{
gcc_assert (!init || !(flag_new_for_scope > 0));
if (!init)
scope = begin_for_scope (&init);
}
FOR_INIT_STMT (r) = init;
FOR_SCOPE (r) = scope;
return r;
}
/* Finish the init-statement of a for-statement, which may be
given by FOR_STMT. */
void
finish_init_stmt (tree for_stmt)
{
if (processing_template_decl)
FOR_INIT_STMT (for_stmt) = pop_stmt_list (FOR_INIT_STMT (for_stmt));
add_stmt (for_stmt);
FOR_BODY (for_stmt) = do_pushlevel (sk_block);
begin_cond (&FOR_COND (for_stmt));
}
/* Finish the COND of a for-statement, which may be given by
FOR_STMT. */
void
finish_for_cond (tree cond, tree for_stmt, bool ivdep)
{
if (check_no_cilk (cond,
"Cilk array notation cannot be used in a condition for a for-loop",
"%<_Cilk_spawn%> statement cannot be used in a condition for a for-loop"))
cond = error_mark_node;
cond = maybe_convert_cond (cond);
finish_cond (&FOR_COND (for_stmt), cond);
begin_maybe_infinite_loop (cond);
if (ivdep && cond != error_mark_node)
FOR_COND (for_stmt) = build2 (ANNOTATE_EXPR,
TREE_TYPE (FOR_COND (for_stmt)),
FOR_COND (for_stmt),
build_int_cst (integer_type_node,
annot_expr_ivdep_kind));
simplify_loop_decl_cond (&FOR_COND (for_stmt), FOR_BODY (for_stmt));
}
/* Finish the increment-EXPRESSION in a for-statement, which may be
given by FOR_STMT. */
void
finish_for_expr (tree expr, tree for_stmt)
{
if (!expr)
return;
/* If EXPR is an overloaded function, issue an error; there is no
context available to use to perform overload resolution. */
if (type_unknown_p (expr))
{
cxx_incomplete_type_error (expr, TREE_TYPE (expr));
expr = error_mark_node;
}
if (!processing_template_decl)
{
if (warn_sequence_point)
verify_sequence_points (expr);
expr = convert_to_void (expr, ICV_THIRD_IN_FOR,
tf_warning_or_error);
}
else if (!type_dependent_expression_p (expr))
convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR,
tf_warning_or_error);
expr = maybe_cleanup_point_expr_void (expr);
if (check_for_bare_parameter_packs (expr))
expr = error_mark_node;
FOR_EXPR (for_stmt) = expr;
}
/* Finish the body of a for-statement, which may be given by
FOR_STMT. The increment-EXPR for the loop must be
provided.
It can also finish RANGE_FOR_STMT. */
void
finish_for_stmt (tree for_stmt)
{
end_maybe_infinite_loop (boolean_true_node);
if (TREE_CODE (for_stmt) == RANGE_FOR_STMT)
RANGE_FOR_BODY (for_stmt) = do_poplevel (RANGE_FOR_BODY (for_stmt));
else
FOR_BODY (for_stmt) = do_poplevel (FOR_BODY (for_stmt));
/* Pop the scope for the body of the loop. */
if (flag_new_for_scope > 0)
{
tree scope;
tree *scope_ptr = (TREE_CODE (for_stmt) == RANGE_FOR_STMT
? &RANGE_FOR_SCOPE (for_stmt)
: &FOR_SCOPE (for_stmt));
scope = *scope_ptr;
*scope_ptr = NULL;
add_stmt (do_poplevel (scope));
}
}
/* Begin a range-for-statement. Returns a new RANGE_FOR_STMT.
SCOPE and INIT should be the return of begin_for_scope,
or both NULL_TREE .
To finish it call finish_for_stmt(). */
tree
begin_range_for_stmt (tree scope, tree init)
{
tree r;
begin_maybe_infinite_loop (boolean_false_node);
r = build_stmt (input_location, RANGE_FOR_STMT,
NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE);
if (scope == NULL_TREE)
{
gcc_assert (!init || !(flag_new_for_scope > 0));
if (!init)
scope = begin_for_scope (&init);
}
/* RANGE_FOR_STMTs do not use nor save the init tree, so we
pop it now. */
if (init)
pop_stmt_list (init);
RANGE_FOR_SCOPE (r) = scope;
return r;
}
/* Finish the head of a range-based for statement, which may
be given by RANGE_FOR_STMT. DECL must be the declaration
and EXPR must be the loop expression. */
void
finish_range_for_decl (tree range_for_stmt, tree decl, tree expr)
{
RANGE_FOR_DECL (range_for_stmt) = decl;
RANGE_FOR_EXPR (range_for_stmt) = expr;
add_stmt (range_for_stmt);
RANGE_FOR_BODY (range_for_stmt) = do_pushlevel (sk_block);
}
/* Finish a break-statement. */
tree
finish_break_stmt (void)
{
/* In switch statements break is sometimes stylistically used after
a return statement. This can lead to spurious warnings about
control reaching the end of a non-void function when it is
inlined. Note that we are calling block_may_fallthru with
language specific tree nodes; this works because
block_may_fallthru returns true when given something it does not
understand. */
if (!block_may_fallthru (cur_stmt_list))
return void_node;
return add_stmt (build_stmt (input_location, BREAK_STMT));
}
/* Finish a continue-statement. */
tree
finish_continue_stmt (void)
{
return add_stmt (build_stmt (input_location, CONTINUE_STMT));
}
/* Begin a switch-statement. Returns a new SWITCH_STMT if
appropriate. */
tree
begin_switch_stmt (void)
{
tree r, scope;
scope = do_pushlevel (sk_cond);
r = build_stmt (input_location, SWITCH_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope);
begin_cond (&SWITCH_STMT_COND (r));
return r;
}
/* Finish the cond of a switch-statement. */
void
finish_switch_cond (tree cond, tree switch_stmt)
{
tree orig_type = NULL;
if (check_no_cilk (cond,
"Cilk array notation cannot be used as a condition for switch statement",
"%<_Cilk_spawn%> statement cannot be used as a condition for switch statement"))
cond = error_mark_node;
if (!processing_template_decl)
{
/* Convert the condition to an integer or enumeration type. */
cond = build_expr_type_conversion (WANT_INT | WANT_ENUM, cond, true);
if (cond == NULL_TREE)
{
error ("switch quantity not an integer");
cond = error_mark_node;
}
/* We want unlowered type here to handle enum bit-fields. */
orig_type = unlowered_expr_type (cond);
if (TREE_CODE (orig_type) != ENUMERAL_TYPE)
orig_type = TREE_TYPE (cond);
if (cond != error_mark_node)
{
/* [stmt.switch]
Integral promotions are performed. */
cond = perform_integral_promotions (cond);
cond = maybe_cleanup_point_expr (cond);
}
}
if (check_for_bare_parameter_packs (cond))
cond = error_mark_node;
else if (!processing_template_decl && warn_sequence_point)
verify_sequence_points (cond);
finish_cond (&SWITCH_STMT_COND (switch_stmt), cond);
SWITCH_STMT_TYPE (switch_stmt) = orig_type;
add_stmt (switch_stmt);
push_switch (switch_stmt);
SWITCH_STMT_BODY (switch_stmt) = push_stmt_list ();
}
/* Finish the body of a switch-statement, which may be given by
SWITCH_STMT. The COND to switch on is indicated. */
void
finish_switch_stmt (tree switch_stmt)
{
tree scope;
SWITCH_STMT_BODY (switch_stmt) =
pop_stmt_list (SWITCH_STMT_BODY (switch_stmt));
pop_switch ();
scope = SWITCH_STMT_SCOPE (switch_stmt);
SWITCH_STMT_SCOPE (switch_stmt) = NULL;
add_stmt (do_poplevel (scope));
}
/* Begin a try-block. Returns a newly-created TRY_BLOCK if
appropriate. */
tree
begin_try_block (void)
{
tree r = build_stmt (input_location, TRY_BLOCK, NULL_TREE, NULL_TREE);
add_stmt (r);
TRY_STMTS (r) = push_stmt_list ();
return r;
}
/* Likewise, for a function-try-block. The block returned in
*COMPOUND_STMT is an artificial outer scope, containing the
function-try-block. */
tree
begin_function_try_block (tree *compound_stmt)
{
tree r;
/* This outer scope does not exist in the C++ standard, but we need
a place to put __FUNCTION__ and similar variables. */
*compound_stmt = begin_compound_stmt (0);
r = begin_try_block ();
FN_TRY_BLOCK_P (r) = 1;
return r;
}
/* Finish a try-block, which may be given by TRY_BLOCK. */
void
finish_try_block (tree try_block)
{
TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block));
TRY_HANDLERS (try_block) = push_stmt_list ();
}
/* Finish the body of a cleanup try-block, which may be given by
TRY_BLOCK. */
void
finish_cleanup_try_block (tree try_block)
{
TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block));
}
/* Finish an implicitly generated try-block, with a cleanup is given
by CLEANUP. */
void
finish_cleanup (tree cleanup, tree try_block)
{
TRY_HANDLERS (try_block) = cleanup;
CLEANUP_P (try_block) = 1;
}
/* Likewise, for a function-try-block. */
void
finish_function_try_block (tree try_block)
{
finish_try_block (try_block);
/* FIXME : something queer about CTOR_INITIALIZER somehow following
the try block, but moving it inside. */
in_function_try_handler = 1;
}
/* Finish a handler-sequence for a try-block, which may be given by
TRY_BLOCK. */
void
finish_handler_sequence (tree try_block)
{
TRY_HANDLERS (try_block) = pop_stmt_list (TRY_HANDLERS (try_block));
check_handlers (TRY_HANDLERS (try_block));
}
/* Finish the handler-seq for a function-try-block, given by
TRY_BLOCK. COMPOUND_STMT is the outer block created by
begin_function_try_block. */
void
finish_function_handler_sequence (tree try_block, tree compound_stmt)
{
in_function_try_handler = 0;
finish_handler_sequence (try_block);
finish_compound_stmt (compound_stmt);
}
/* Begin a handler. Returns a HANDLER if appropriate. */
tree
begin_handler (void)
{
tree r;
r = build_stmt (input_location, HANDLER, NULL_TREE, NULL_TREE);
add_stmt (r);
/* Create a binding level for the eh_info and the exception object
cleanup. */
HANDLER_BODY (r) = do_pushlevel (sk_catch);
return r;
}
/* Finish the handler-parameters for a handler, which may be given by
HANDLER. DECL is the declaration for the catch parameter, or NULL
if this is a `catch (...)' clause. */
void
finish_handler_parms (tree decl, tree handler)
{
tree type = NULL_TREE;
if (processing_template_decl)
{
if (decl)
{
decl = pushdecl (decl);
decl = push_template_decl (decl);
HANDLER_PARMS (handler) = decl;
type = TREE_TYPE (decl);
}
}
else
type = expand_start_catch_block (decl);
HANDLER_TYPE (handler) = type;
}
/* Finish a handler, which may be given by HANDLER. The BLOCKs are
the return value from the matching call to finish_handler_parms. */
void
finish_handler (tree handler)
{
if (!processing_template_decl)
expand_end_catch_block ();
HANDLER_BODY (handler) = do_poplevel (HANDLER_BODY (handler));
}
/* Begin a compound statement. FLAGS contains some bits that control the
behavior and context. If BCS_NO_SCOPE is set, the compound statement
does not define a scope. If BCS_FN_BODY is set, this is the outermost
block of a function. If BCS_TRY_BLOCK is set, this is the block
created on behalf of a TRY statement. Returns a token to be passed to
finish_compound_stmt. */
tree
begin_compound_stmt (unsigned int flags)
{
tree r;
if (flags & BCS_NO_SCOPE)
{
r = push_stmt_list ();
STATEMENT_LIST_NO_SCOPE (r) = 1;
/* Normally, we try hard to keep the BLOCK for a statement-expression.
But, if it's a statement-expression with a scopeless block, there's
nothing to keep, and we don't want to accidentally keep a block
*inside* the scopeless block. */
keep_next_level (false);
}
else
{
scope_kind sk = sk_block;
if (flags & BCS_TRY_BLOCK)
sk = sk_try;
else if (flags & BCS_TRANSACTION)
sk = sk_transaction;
r = do_pushlevel (sk);
}
/* When processing a template, we need to remember where the braces were,
so that we can set up identical scopes when instantiating the template
later. BIND_EXPR is a handy candidate for this.
Note that do_poplevel won't create a BIND_EXPR itself here (and thus
result in nested BIND_EXPRs), since we don't build BLOCK nodes when
processing templates. */
if (processing_template_decl)
{
r = build3 (BIND_EXPR, NULL, NULL, r, NULL);
BIND_EXPR_TRY_BLOCK (r) = (flags & BCS_TRY_BLOCK) != 0;
BIND_EXPR_BODY_BLOCK (r) = (flags & BCS_FN_BODY) != 0;
TREE_SIDE_EFFECTS (r) = 1;
}
return r;
}
/* Finish a compound-statement, which is given by STMT. */
void
finish_compound_stmt (tree stmt)
{
if (TREE_CODE (stmt) == BIND_EXPR)
{
tree body = do_poplevel (BIND_EXPR_BODY (stmt));
/* If the STATEMENT_LIST is empty and this BIND_EXPR isn't special,
discard the BIND_EXPR so it can be merged with the containing
STATEMENT_LIST. */
if (TREE_CODE (body) == STATEMENT_LIST
&& STATEMENT_LIST_HEAD (body) == NULL
&& !BIND_EXPR_BODY_BLOCK (stmt)
&& !BIND_EXPR_TRY_BLOCK (stmt))
stmt = body;
else
BIND_EXPR_BODY (stmt) = body;
}
else if (STATEMENT_LIST_NO_SCOPE (stmt))
stmt = pop_stmt_list (stmt);
else
{
/* Destroy any ObjC "super" receivers that may have been
created. */
objc_clear_super_receiver ();
stmt = do_poplevel (stmt);
}
/* ??? See c_end_compound_stmt wrt statement expressions. */
add_stmt (stmt);
}
/* Finish an asm-statement, whose components are a STRING, some
OUTPUT_OPERANDS, some INPUT_OPERANDS, some CLOBBERS and some
LABELS. Also note whether the asm-statement should be
considered volatile. */
tree
finish_asm_stmt (int volatile_p, tree string, tree output_operands,
tree input_operands, tree clobbers, tree labels)
{
tree r;
tree t;
int ninputs = list_length (input_operands);
int noutputs = list_length (output_operands);
if (!processing_template_decl)
{
const char *constraint;
const char **oconstraints;
bool allows_mem, allows_reg, is_inout;
tree operand;
int i;
oconstraints = XALLOCAVEC (const char *, noutputs);
string = resolve_asm_operand_names (string, output_operands,
input_operands, labels);
for (i = 0, t = output_operands; t; t = TREE_CHAIN (t), ++i)
{
operand = TREE_VALUE (t);
/* ??? Really, this should not be here. Users should be using a
proper lvalue, dammit. But there's a long history of using
casts in the output operands. In cases like longlong.h, this
becomes a primitive form of typechecking -- if the cast can be
removed, then the output operand had a type of the proper width;
otherwise we'll get an error. Gross, but ... */
STRIP_NOPS (operand);
operand = mark_lvalue_use (operand);
if (!lvalue_or_else (operand, lv_asm, tf_warning_or_error))
operand = error_mark_node;
if (operand != error_mark_node
&& (TREE_READONLY (operand)
|| CP_TYPE_CONST_P (TREE_TYPE (operand))
/* Functions are not modifiable, even though they are
lvalues. */
|| TREE_CODE (TREE_TYPE (operand)) == FUNCTION_TYPE
|| TREE_CODE (TREE_TYPE (operand)) == METHOD_TYPE
/* If it's an aggregate and any field is const, then it is
effectively const. */
|| (CLASS_TYPE_P (TREE_TYPE (operand))
&& C_TYPE_FIELDS_READONLY (TREE_TYPE (operand)))))
cxx_readonly_error (operand, lv_asm);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
oconstraints[i] = constraint;
if (parse_output_constraint (&constraint, i, ninputs, noutputs,
&allows_mem, &allows_reg, &is_inout))
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && !cxx_mark_addressable (operand))
operand = error_mark_node;
}
else
operand = error_mark_node;
TREE_VALUE (t) = operand;
}
for (i = 0, t = input_operands; t; ++i, t = TREE_CHAIN (t))
{
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
bool constraint_parsed
= parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
/* If the operand is going to end up in memory, don't call
decay_conversion. */
if (constraint_parsed && !allows_reg && allows_mem)
operand = mark_lvalue_use (TREE_VALUE (t));
else
operand = decay_conversion (TREE_VALUE (t), tf_warning_or_error);
/* If the type of the operand hasn't been determined (e.g.,
because it involves an overloaded function), then issue
an error message. There's no context available to
resolve the overloading. */
if (TREE_TYPE (operand) == unknown_type_node)
{
error ("type of asm operand %qE could not be determined",
TREE_VALUE (t));
operand = error_mark_node;
}
if (constraint_parsed)
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && allows_mem)
{
/* Strip the nops as we allow this case. FIXME, this really
should be rejected or made deprecated. */
STRIP_NOPS (operand);
if (!cxx_mark_addressable (operand))
operand = error_mark_node;
}
else if (!allows_reg && !allows_mem)
{
/* If constraint allows neither register nor memory,
try harder to get a constant. */
tree constop = maybe_constant_value (operand);
if (TREE_CONSTANT (constop))
operand = constop;
}
}
else
operand = error_mark_node;
TREE_VALUE (t) = operand;
}
}
r = build_stmt (input_location, ASM_EXPR, string,
output_operands, input_operands,
clobbers, labels);
ASM_VOLATILE_P (r) = volatile_p || noutputs == 0;
r = maybe_cleanup_point_expr_void (r);
return add_stmt (r);
}
/* Finish a label with the indicated NAME. Returns the new label. */
tree
finish_label_stmt (tree name)
{
tree decl = define_label (input_location, name);
if (decl == error_mark_node)
return error_mark_node;
add_stmt (build_stmt (input_location, LABEL_EXPR, decl));
return decl;
}
/* Finish a series of declarations for local labels. G++ allows users
to declare "local" labels, i.e., labels with scope. This extension
is useful when writing code involving statement-expressions. */
void
finish_label_decl (tree name)
{
if (!at_function_scope_p ())
{
error ("__label__ declarations are only allowed in function scopes");
return;
}
add_decl_expr (declare_local_label (name));
}
/* When DECL goes out of scope, make sure that CLEANUP is executed. */
void
finish_decl_cleanup (tree decl, tree cleanup)
{
push_cleanup (decl, cleanup, false);
}
/* If the current scope exits with an exception, run CLEANUP. */
void
finish_eh_cleanup (tree cleanup)
{
push_cleanup (NULL, cleanup, true);
}
/* The MEM_INITS is a list of mem-initializers, in reverse of the
order they were written by the user. Each node is as for
emit_mem_initializers. */
void
finish_mem_initializers (tree mem_inits)
{
/* Reorder the MEM_INITS so that they are in the order they appeared
in the source program. */
mem_inits = nreverse (mem_inits);
if (processing_template_decl)
{
tree mem;
for (mem = mem_inits; mem; mem = TREE_CHAIN (mem))
{
/* If the TREE_PURPOSE is a TYPE_PACK_EXPANSION, skip the
check for bare parameter packs in the TREE_VALUE, because
any parameter packs in the TREE_VALUE have already been
bound as part of the TREE_PURPOSE. See
make_pack_expansion for more information. */
if (TREE_CODE (TREE_PURPOSE (mem)) != TYPE_PACK_EXPANSION
&& check_for_bare_parameter_packs (TREE_VALUE (mem)))
TREE_VALUE (mem) = error_mark_node;
}
add_stmt (build_min_nt_loc (UNKNOWN_LOCATION,
CTOR_INITIALIZER, mem_inits));
}
else
emit_mem_initializers (mem_inits);
}
/* Obfuscate EXPR if it looks like an id-expression or member access so
that the call to finish_decltype in do_auto_deduction will give the
right result. */
tree
force_paren_expr (tree expr)
{
/* This is only needed for decltype(auto) in C++14. */
if (cxx_dialect < cxx14)
return expr;
/* If we're in unevaluated context, we can't be deducing a
return/initializer type, so we don't need to mess with this. */
if (cp_unevaluated_operand)
return expr;
if (!DECL_P (expr) && TREE_CODE (expr) != COMPONENT_REF
&& TREE_CODE (expr) != SCOPE_REF)
return expr;
if (TREE_CODE (expr) == COMPONENT_REF
|| TREE_CODE (expr) == SCOPE_REF)
REF_PARENTHESIZED_P (expr) = true;
else if (type_dependent_expression_p (expr))
expr = build1 (PAREN_EXPR, TREE_TYPE (expr), expr);
else if (VAR_P (expr) && DECL_HARD_REGISTER (expr))
/* We can't bind a hard register variable to a reference. */;
else
{
cp_lvalue_kind kind = lvalue_kind (expr);
if ((kind & ~clk_class) != clk_none)
{
tree type = unlowered_expr_type (expr);
bool rval = !!(kind & clk_rvalueref);
type = cp_build_reference_type (type, rval);
/* This inhibits warnings in, eg, cxx_mark_addressable
(c++/60955). */
warning_sentinel s (extra_warnings);
expr = build_static_cast (type, expr, tf_error);
if (expr != error_mark_node)
REF_PARENTHESIZED_P (expr) = true;
}
}
return expr;
}
/* If T is an id-expression obfuscated by force_paren_expr, undo the
obfuscation and return the underlying id-expression. Otherwise
return T. */
tree
maybe_undo_parenthesized_ref (tree t)
{
if (cxx_dialect >= cxx14
&& INDIRECT_REF_P (t)
&& REF_PARENTHESIZED_P (t))
{
t = TREE_OPERAND (t, 0);
while (TREE_CODE (t) == NON_LVALUE_EXPR
|| TREE_CODE (t) == NOP_EXPR)
t = TREE_OPERAND (t, 0);
gcc_assert (TREE_CODE (t) == ADDR_EXPR
|| TREE_CODE (t) == STATIC_CAST_EXPR);
t = TREE_OPERAND (t, 0);
}
return t;
}
/* Finish a parenthesized expression EXPR. */
cp_expr
finish_parenthesized_expr (cp_expr expr)
{
if (EXPR_P (expr))
/* This inhibits warnings in c_common_truthvalue_conversion. */
TREE_NO_WARNING (expr) = 1;
if (TREE_CODE (expr) == OFFSET_REF
|| TREE_CODE (expr) == SCOPE_REF)
/* [expr.unary.op]/3 The qualified id of a pointer-to-member must not be
enclosed in parentheses. */
PTRMEM_OK_P (expr) = 0;
if (TREE_CODE (expr) == STRING_CST)
PAREN_STRING_LITERAL_P (expr) = 1;
expr = cp_expr (force_paren_expr (expr), expr.get_location ());
return expr;
}
/* Finish a reference to a non-static data member (DECL) that is not
preceded by `.' or `->'. */
tree
finish_non_static_data_member (tree decl, tree object, tree qualifying_scope)
{
gcc_assert (TREE_CODE (decl) == FIELD_DECL);
bool try_omp_private = !object && omp_private_member_map;
tree ret;
if (!object)
{
tree scope = qualifying_scope;
if (scope == NULL_TREE)
scope = context_for_name_lookup (decl);
object = maybe_dummy_object (scope, NULL);
}
object = maybe_resolve_dummy (object, true);
if (object == error_mark_node)
return error_mark_node;
/* DR 613/850: Can use non-static data members without an associated
object in sizeof/decltype/alignof. */
if (is_dummy_object (object) && cp_unevaluated_operand == 0
&& (!processing_template_decl || !current_class_ref))
{
if (current_function_decl
&& DECL_STATIC_FUNCTION_P (current_function_decl))
error ("invalid use of member %qD in static member function", decl);
else
error ("invalid use of non-static data member %qD", decl);
inform (DECL_SOURCE_LOCATION (decl), "declared here");
return error_mark_node;
}
if (current_class_ptr)
TREE_USED (current_class_ptr) = 1;
if (processing_template_decl && !qualifying_scope)
{
tree type = TREE_TYPE (decl);
if (TREE_CODE (type) == REFERENCE_TYPE)
/* Quals on the object don't matter. */;
else if (PACK_EXPANSION_P (type))
/* Don't bother trying to represent this. */
type = NULL_TREE;
else
{
/* Set the cv qualifiers. */
int quals = cp_type_quals (TREE_TYPE (object));
if (DECL_MUTABLE_P (decl))
quals &= ~TYPE_QUAL_CONST;
quals |= cp_type_quals (TREE_TYPE (decl));
type = cp_build_qualified_type (type, quals);
}
ret = (convert_from_reference
(build_min (COMPONENT_REF, type, object, decl, NULL_TREE)));
}
/* If PROCESSING_TEMPLATE_DECL is nonzero here, then
QUALIFYING_SCOPE is also non-null. Wrap this in a SCOPE_REF
for now. */
else if (processing_template_decl)
ret = build_qualified_name (TREE_TYPE (decl),
qualifying_scope,
decl,
/*template_p=*/false);
else
{
tree access_type = TREE_TYPE (object);
perform_or_defer_access_check (TYPE_BINFO (access_type), decl,
decl, tf_warning_or_error);
/* If the data member was named `C::M', convert `*this' to `C'
first. */
if (qualifying_scope)
{
tree binfo = NULL_TREE;
object = build_scoped_ref (object, qualifying_scope,
&binfo);
}
ret = build_class_member_access_expr (object, decl,
/*access_path=*/NULL_TREE,
/*preserve_reference=*/false,
tf_warning_or_error);
}
if (try_omp_private)
{
tree *v = omp_private_member_map->get (decl);
if (v)
ret = convert_from_reference (*v);
}
return ret;
}
/* If we are currently parsing a template and we encountered a typedef
TYPEDEF_DECL that is being accessed though CONTEXT, this function
adds the typedef to a list tied to the current template.
At template instantiation time, that list is walked and access check
performed for each typedef.
LOCATION is the location of the usage point of TYPEDEF_DECL. */
void
add_typedef_to_current_template_for_access_check (tree typedef_decl,
tree context,
location_t location)
{
tree template_info = NULL;
tree cs = current_scope ();
if (!is_typedef_decl (typedef_decl)
|| !context
|| !CLASS_TYPE_P (context)
|| !cs)
return;
if (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL)
template_info = get_template_info (cs);
if (template_info
&& TI_TEMPLATE (template_info)
&& !currently_open_class (context))
append_type_to_template_for_access_check (cs, typedef_decl,
context, location);
}
/* DECL was the declaration to which a qualified-id resolved. Issue
an error message if it is not accessible. If OBJECT_TYPE is
non-NULL, we have just seen `x->' or `x.' and OBJECT_TYPE is the
type of `*x', or `x', respectively. If the DECL was named as
`A::B' then NESTED_NAME_SPECIFIER is `A'. */
void
check_accessibility_of_qualified_id (tree decl,
tree object_type,
tree nested_name_specifier)
{
tree scope;
tree qualifying_type = NULL_TREE;
/* If we are parsing a template declaration and if decl is a typedef,
add it to a list tied to the template.
At template instantiation time, that list will be walked and
access check performed. */
add_typedef_to_current_template_for_access_check (decl,
nested_name_specifier
? nested_name_specifier
: DECL_CONTEXT (decl),
input_location);
/* If we're not checking, return immediately. */
if (deferred_access_no_check)
return;
/* Determine the SCOPE of DECL. */
scope = context_for_name_lookup (decl);
/* If the SCOPE is not a type, then DECL is not a member. */
if (!TYPE_P (scope))
return;
/* Compute the scope through which DECL is being accessed. */
if (object_type
/* OBJECT_TYPE might not be a class type; consider:
class A { typedef int I; };
I *p;
p->A::I::~I();
In this case, we will have "A::I" as the DECL, but "I" as the
OBJECT_TYPE. */
&& CLASS_TYPE_P (object_type)
&& DERIVED_FROM_P (scope, object_type))
/* If we are processing a `->' or `.' expression, use the type of the
left-hand side. */
qualifying_type = object_type;
else if (nested_name_specifier)
{
/* If the reference is to a non-static member of the
current class, treat it as if it were referenced through
`this'. */
tree ct;
if (DECL_NONSTATIC_MEMBER_P (decl)
&& current_class_ptr
&& DERIVED_FROM_P (scope, ct = current_nonlambda_class_type ()))
qualifying_type = ct;
/* Otherwise, use the type indicated by the
nested-name-specifier. */
else
qualifying_type = nested_name_specifier;
}
else
/* Otherwise, the name must be from the current class or one of
its bases. */
qualifying_type = currently_open_derived_class (scope);
if (qualifying_type
/* It is possible for qualifying type to be a TEMPLATE_TYPE_PARM
or similar in a default argument value. */
&& CLASS_TYPE_P (qualifying_type)
&& !dependent_type_p (qualifying_type))
perform_or_defer_access_check (TYPE_BINFO (qualifying_type), decl,
decl, tf_warning_or_error);
}
/* EXPR is the result of a qualified-id. The QUALIFYING_CLASS was the
class named to the left of the "::" operator. DONE is true if this
expression is a complete postfix-expression; it is false if this
expression is followed by '->', '[', '(', etc. ADDRESS_P is true
iff this expression is the operand of '&'. TEMPLATE_P is true iff
the qualified-id was of the form "A::template B". TEMPLATE_ARG_P
is true iff this qualified name appears as a template argument. */
tree
finish_qualified_id_expr (tree qualifying_class,
tree expr,
bool done,
bool address_p,
bool template_p,
bool template_arg_p,
tsubst_flags_t complain)
{
gcc_assert (TYPE_P (qualifying_class));
if (error_operand_p (expr))
return error_mark_node;
if ((DECL_P (expr) || BASELINK_P (expr))
&& !mark_used (expr, complain))
return error_mark_node;
if (template_p)
{
if (TREE_CODE (expr) == UNBOUND_CLASS_TEMPLATE)
/* cp_parser_lookup_name thought we were looking for a type,
but we're actually looking for a declaration. */
expr = build_qualified_name (/*type*/NULL_TREE,
TYPE_CONTEXT (expr),
TYPE_IDENTIFIER (expr),
/*template_p*/true);
else
check_template_keyword (expr);
}
/* If EXPR occurs as the operand of '&', use special handling that
permits a pointer-to-member. */
if (address_p && done)
{
if (TREE_CODE (expr) == SCOPE_REF)
expr = TREE_OPERAND (expr, 1);
expr = build_offset_ref (qualifying_class, expr,
/*address_p=*/true, complain);
return expr;
}
/* No need to check access within an enum. */
if (TREE_CODE (qualifying_class) == ENUMERAL_TYPE)
return expr;
/* Within the scope of a class, turn references to non-static
members into expression of the form "this->...". */
if (template_arg_p)
/* But, within a template argument, we do not want make the
transformation, as there is no "this" pointer. */
;
else if (TREE_CODE (expr) == FIELD_DECL)
{
push_deferring_access_checks (dk_no_check);
expr = finish_non_static_data_member (expr, NULL_TREE,
qualifying_class);
pop_deferring_access_checks ();
}
else if (BASELINK_P (expr)
&& (!processing_template_decl
|| parsing_default_capturing_generic_lambda ()))
{
/* See if any of the functions are non-static members. */
/* If so, the expression may be relative to 'this'. */
if (!shared_member_p (expr)
&& current_class_ptr
&& DERIVED_FROM_P (qualifying_class,
current_nonlambda_class_type ()))
expr = (build_class_member_access_expr
(maybe_dummy_object (qualifying_class, NULL),
expr,
BASELINK_ACCESS_BINFO (expr),
/*preserve_reference=*/false,
complain));
else if (done)
/* The expression is a qualified name whose address is not
being taken. */
expr = build_offset_ref (qualifying_class, expr, /*address_p=*/false,
complain);
}
else if (BASELINK_P (expr))
;
else
{
/* In a template, return a SCOPE_REF for most qualified-ids
so that we can check access at instantiation time. But if
we're looking at a member of the current instantiation, we
know we have access and building up the SCOPE_REF confuses
non-type template argument handling. */
if (processing_template_decl
&& !currently_open_class (qualifying_class))
expr = build_qualified_name (TREE_TYPE (expr),
qualifying_class, expr,
template_p);
expr = convert_from_reference (expr);
}
return expr;
}
/* Begin a statement-expression. The value returned must be passed to
finish_stmt_expr. */
tree
begin_stmt_expr (void)
{
return push_stmt_list ();
}
/* Process the final expression of a statement expression. EXPR can be
NULL, if the final expression is empty. Return a STATEMENT_LIST
containing all the statements in the statement-expression, or
ERROR_MARK_NODE if there was an error. */
tree
finish_stmt_expr_expr (tree expr, tree stmt_expr)
{
if (error_operand_p (expr))
{
/* The type of the statement-expression is the type of the last
expression. */
TREE_TYPE (stmt_expr) = error_mark_node;
return error_mark_node;
}
/* If the last statement does not have "void" type, then the value
of the last statement is the value of the entire expression. */
if (expr)
{
tree type = TREE_TYPE (expr);
if (processing_template_decl)
{
expr = build_stmt (input_location, EXPR_STMT, expr);
expr = add_stmt (expr);
/* Mark the last statement so that we can recognize it as such at
template-instantiation time. */
EXPR_STMT_STMT_EXPR_RESULT (expr) = 1;
}
else if (VOID_TYPE_P (type))
{
/* Just treat this like an ordinary statement. */
expr = finish_expr_stmt (expr);
}
else
{
/* It actually has a value we need to deal with. First, force it
to be an rvalue so that we won't need to build up a copy
constructor call later when we try to assign it to something. */
expr = force_rvalue (expr, tf_warning_or_error);
if (error_operand_p (expr))
return error_mark_node;
/* Update for array-to-pointer decay. */
type = TREE_TYPE (expr);
/* Wrap it in a CLEANUP_POINT_EXPR and add it to the list like a
normal statement, but don't convert to void or actually add
the EXPR_STMT. */
if (TREE_CODE (expr) != CLEANUP_POINT_EXPR)
expr = maybe_cleanup_point_expr (expr);
add_stmt (expr);
}
/* The type of the statement-expression is the type of the last
expression. */
TREE_TYPE (stmt_expr) = type;
}
return stmt_expr;
}
/* Finish a statement-expression. EXPR should be the value returned
by the previous begin_stmt_expr. Returns an expression
representing the statement-expression. */
tree
finish_stmt_expr (tree stmt_expr, bool has_no_scope)
{
tree type;
tree result;
if (error_operand_p (stmt_expr))
{
pop_stmt_list (stmt_expr);
return error_mark_node;
}
gcc_assert (TREE_CODE (stmt_expr) == STATEMENT_LIST);
type = TREE_TYPE (stmt_expr);
result = pop_stmt_list (stmt_expr);
TREE_TYPE (result) = type;
if (processing_template_decl)
{
result = build_min (STMT_EXPR, type, result);
TREE_SIDE_EFFECTS (result) = 1;
STMT_EXPR_NO_SCOPE (result) = has_no_scope;
}
else if (CLASS_TYPE_P (type))
{
/* Wrap the statement-expression in a TARGET_EXPR so that the
temporary object created by the final expression is destroyed at
the end of the full-expression containing the
statement-expression. */
result = force_target_expr (type, result, tf_warning_or_error);
}
return result;
}
/* Returns the expression which provides the value of STMT_EXPR. */
tree
stmt_expr_value_expr (tree stmt_expr)
{
tree t = STMT_EXPR_STMT (stmt_expr);
if (TREE_CODE (t) == BIND_EXPR)
t = BIND_EXPR_BODY (t);
if (TREE_CODE (t) == STATEMENT_LIST && STATEMENT_LIST_TAIL (t))
t = STATEMENT_LIST_TAIL (t)->stmt;
if (TREE_CODE (t) == EXPR_STMT)
t = EXPR_STMT_EXPR (t);
return t;
}
/* Return TRUE iff EXPR_STMT is an empty list of
expression statements. */
bool
empty_expr_stmt_p (tree expr_stmt)
{
tree body = NULL_TREE;
if (expr_stmt == void_node)
return true;
if (expr_stmt)
{
if (TREE_CODE (expr_stmt) == EXPR_STMT)
body = EXPR_STMT_EXPR (expr_stmt);
else if (TREE_CODE (expr_stmt) == STATEMENT_LIST)
body = expr_stmt;
}
if (body)
{
if (TREE_CODE (body) == STATEMENT_LIST)
return tsi_end_p (tsi_start (body));
else
return empty_expr_stmt_p (body);
}
return false;
}
/* Perform Koenig lookup. FN is the postfix-expression representing
the function (or functions) to call; ARGS are the arguments to the
call. Returns the functions to be considered by overload resolution. */
cp_expr
perform_koenig_lookup (cp_expr fn, vec<tree, va_gc> *args,
tsubst_flags_t complain)
{
tree identifier = NULL_TREE;
tree functions = NULL_TREE;
tree tmpl_args = NULL_TREE;
bool template_id = false;
location_t loc = fn.get_location ();
if (TREE_CODE (fn) == TEMPLATE_ID_EXPR)
{
/* Use a separate flag to handle null args. */
template_id = true;
tmpl_args = TREE_OPERAND (fn, 1);
fn = TREE_OPERAND (fn, 0);
}
/* Find the name of the overloaded function. */
if (identifier_p (fn))
identifier = fn;
else if (is_overloaded_fn (fn))
{
functions = fn;
identifier = DECL_NAME (get_first_fn (functions));
}
else if (DECL_P (fn))
{
functions = fn;
identifier = DECL_NAME (fn);
}
/* A call to a namespace-scope function using an unqualified name.
Do Koenig lookup -- unless any of the arguments are
type-dependent. */
if (!any_type_dependent_arguments_p (args)
&& !any_dependent_template_arguments_p (tmpl_args))
{
fn = lookup_arg_dependent (identifier, functions, args);
if (!fn)
{
/* The unqualified name could not be resolved. */
if (complain & tf_error)
fn = unqualified_fn_lookup_error (cp_expr (identifier, loc));
else
fn = identifier;
}
}
if (fn && template_id && fn != error_mark_node)
fn = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fn, tmpl_args);
return fn;
}
/* Generate an expression for `FN (ARGS)'. This may change the
contents of ARGS.
If DISALLOW_VIRTUAL is true, the call to FN will be not generated
as a virtual call, even if FN is virtual. (This flag is set when
encountering an expression where the function name is explicitly
qualified. For example a call to `X::f' never generates a virtual
call.)
Returns code for the call. */
tree
finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual,
bool koenig_p, tsubst_flags_t complain)
{
tree result;
tree orig_fn;
vec<tree, va_gc> *orig_args = NULL;
if (fn == error_mark_node)
return error_mark_node;
gcc_assert (!TYPE_P (fn));
/* If FN may be a FUNCTION_DECL obfuscated by force_paren_expr, undo
it so that we can tell this is a call to a known function. */
fn = maybe_undo_parenthesized_ref (fn);
orig_fn = fn;
if (processing_template_decl)
{
/* If FN is a local extern declaration or set thereof, look them up
again at instantiation time. */
if (is_overloaded_fn (fn))
{
tree ifn = get_first_fn (fn);
if (TREE_CODE (ifn) == FUNCTION_DECL
&& DECL_LOCAL_FUNCTION_P (ifn))
orig_fn = DECL_NAME (ifn);
}
/* If the call expression is dependent, build a CALL_EXPR node
with no type; type_dependent_expression_p recognizes
expressions with no type as being dependent. */
if (type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (*args))
{
result = build_nt_call_vec (orig_fn, *args);
SET_EXPR_LOCATION (result, EXPR_LOC_OR_LOC (fn, input_location));
KOENIG_LOOKUP_P (result) = koenig_p;
if (cfun)
{
do
{
tree fndecl = OVL_CURRENT (fn);
if (TREE_CODE (fndecl) != FUNCTION_DECL
|| !TREE_THIS_VOLATILE (fndecl))
break;
fn = OVL_NEXT (fn);
}
while (fn);
if (!fn)
current_function_returns_abnormally = 1;
}
return result;
}
orig_args = make_tree_vector_copy (*args);
if (!BASELINK_P (fn)
&& TREE_CODE (fn) != PSEUDO_DTOR_EXPR
&& TREE_TYPE (fn) != unknown_type_node)
fn = build_non_dependent_expr (fn);
make_args_non_dependent (*args);
}
if (TREE_CODE (fn) == COMPONENT_REF)
{
tree member = TREE_OPERAND (fn, 1);
if (BASELINK_P (member))
{
tree object = TREE_OPERAND (fn, 0);
return build_new_method_call (object, member,
args, NULL_TREE,
(disallow_virtual
? LOOKUP_NORMAL | LOOKUP_NONVIRTUAL
: LOOKUP_NORMAL),
/*fn_p=*/NULL,
complain);
}
}
/* Per 13.3.1.1, '(&f)(...)' is the same as '(f)(...)'. */
if (TREE_CODE (fn) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (fn, 0)) == OVERLOAD)
fn = TREE_OPERAND (fn, 0);
if (is_overloaded_fn (fn))
fn = baselink_for_fns (fn);
result = NULL_TREE;
if (BASELINK_P (fn))
{
tree object;
/* A call to a member function. From [over.call.func]:
If the keyword this is in scope and refers to the class of
that member function, or a derived class thereof, then the
function call is transformed into a qualified function call
using (*this) as the postfix-expression to the left of the
. operator.... [Otherwise] a contrived object of type T
becomes the implied object argument.
In this situation:
struct A { void f(); };
struct B : public A {};
struct C : public A { void g() { B::f(); }};
"the class of that member function" refers to `A'. But 11.2
[class.access.base] says that we need to convert 'this' to B* as
part of the access, so we pass 'B' to maybe_dummy_object. */
if (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (get_first_fn (fn)))
{
/* A constructor call always uses a dummy object. (This constructor
call which has the form A::A () is actually invalid and we are
going to reject it later in build_new_method_call.) */
object = build_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)));
}
else
object = maybe_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)),
NULL);
result = build_new_method_call (object, fn, args, NULL_TREE,
(disallow_virtual
? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL
: LOOKUP_NORMAL),
/*fn_p=*/NULL,
complain);
}
else if (is_overloaded_fn (fn))
{
/* If the function is an overloaded builtin, resolve it. */
if (TREE_CODE (fn) == FUNCTION_DECL
&& (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
|| DECL_BUILT_IN_CLASS (fn) == BUILT_IN_MD))
result = resolve_overloaded_builtin (input_location, fn, *args);
if (!result)
{
if (warn_sizeof_pointer_memaccess
&& (complain & tf_warning)
&& !vec_safe_is_empty (*args)
&& !processing_template_decl)
{
location_t sizeof_arg_loc[3];
tree sizeof_arg[3];
unsigned int i;
for (i = 0; i < 3; i++)
{
tree t;
sizeof_arg_loc[i] = UNKNOWN_LOCATION;
sizeof_arg[i] = NULL_TREE;
if (i >= (*args)->length ())
continue;
t = (**args)[i];
if (TREE_CODE (t) != SIZEOF_EXPR)
continue;
if (SIZEOF_EXPR_TYPE_P (t))
sizeof_arg[i] = TREE_TYPE (TREE_OPERAND (t, 0));
else
sizeof_arg[i] = TREE_OPERAND (t, 0);
sizeof_arg_loc[i] = EXPR_LOCATION (t);
}
sizeof_pointer_memaccess_warning
(sizeof_arg_loc, fn, *args,
sizeof_arg, same_type_ignoring_top_level_qualifiers_p);
}
/* A call to a namespace-scope function. */
result = build_new_function_call (fn, args, koenig_p, complain);
}
}
else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR)
{
if (!vec_safe_is_empty (*args))
error ("arguments to destructor are not allowed");
/* Mark the pseudo-destructor call as having side-effects so
that we do not issue warnings about its use. */
result = build1 (NOP_EXPR,
void_type_node,
TREE_OPERAND (fn, 0));
TREE_SIDE_EFFECTS (result) = 1;
}
else if (CLASS_TYPE_P (TREE_TYPE (fn)))
/* If the "function" is really an object of class type, it might
have an overloaded `operator ()'. */
result = build_op_call (fn, args, complain);
if (!result)
/* A call where the function is unknown. */
result = cp_build_function_call_vec (fn, args, complain);
if (processing_template_decl && result != error_mark_node)
{
if (INDIRECT_REF_P (result))
result = TREE_OPERAND (result, 0);
result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args);
SET_EXPR_LOCATION (result, input_location);
KOENIG_LOOKUP_P (result) = koenig_p;
release_tree_vector (orig_args);
result = convert_from_reference (result);
}
if (koenig_p)
{
/* Free garbage OVERLOADs from arg-dependent lookup. */
tree next = NULL_TREE;
for (fn = orig_fn;
fn && TREE_CODE (fn) == OVERLOAD && OVL_ARG_DEPENDENT (fn);
fn = next)
{
if (processing_template_decl)
/* In a template, we'll re-use them at instantiation time. */
OVL_ARG_DEPENDENT (fn) = false;
else
{
next = OVL_CHAIN (fn);
ggc_free (fn);
}
}
}
return result;
}
/* Finish a call to a postfix increment or decrement or EXPR. (Which
is indicated by CODE, which should be POSTINCREMENT_EXPR or
POSTDECREMENT_EXPR.) */
cp_expr
finish_increment_expr (cp_expr expr, enum tree_code code)
{
/* input_location holds the location of the trailing operator token.
Build a location of the form:
expr++
~~~~^~
with the caret at the operator token, ranging from the start
of EXPR to the end of the operator token. */
location_t combined_loc = make_location (input_location,
expr.get_start (),
get_finish (input_location));
cp_expr result = build_x_unary_op (combined_loc, code, expr,
tf_warning_or_error);
/* TODO: build_x_unary_op doesn't honor the location, so set it here. */
result.set_location (combined_loc);
return result;
}
/* Finish a use of `this'. Returns an expression for `this'. */
tree
finish_this_expr (void)
{
tree result = NULL_TREE;
if (current_class_ptr)
{
tree type = TREE_TYPE (current_class_ref);
/* In a lambda expression, 'this' refers to the captured 'this'. */
if (LAMBDA_TYPE_P (type))
result = lambda_expr_this_capture (CLASSTYPE_LAMBDA_EXPR (type), true);
else
result = current_class_ptr;
}
if (result)
/* The keyword 'this' is a prvalue expression. */
return rvalue (result);
tree fn = current_nonlambda_function ();
if (fn && DECL_STATIC_FUNCTION_P (fn))
error ("%<this%> is unavailable for static member functions");
else if (fn)
error ("invalid use of %<this%> in non-member function");
else
error ("invalid use of %<this%> at top level");
return error_mark_node;
}
/* Finish a pseudo-destructor expression. If SCOPE is NULL, the
expression was of the form `OBJECT.~DESTRUCTOR' where DESTRUCTOR is
the TYPE for the type given. If SCOPE is non-NULL, the expression
was of the form `OBJECT.SCOPE::~DESTRUCTOR'. */
tree
finish_pseudo_destructor_expr (tree object, tree scope, tree destructor,
location_t loc)
{
if (object == error_mark_node || destructor == error_mark_node)
return error_mark_node;
gcc_assert (TYPE_P (destructor));
if (!processing_template_decl)
{
if (scope == error_mark_node)
{
error_at (loc, "invalid qualifying scope in pseudo-destructor name");
return error_mark_node;
}
if (is_auto (destructor))
destructor = TREE_TYPE (object);
if (scope && TYPE_P (scope) && !check_dtor_name (scope, destructor))
{
error_at (loc,
"qualified type %qT does not match destructor name ~%qT",
scope, destructor);
return error_mark_node;
}
/* [expr.pseudo] says both:
The type designated by the pseudo-destructor-name shall be
the same as the object type.
and:
The cv-unqualified versions of the object type and of the
type designated by the pseudo-destructor-name shall be the
same type.
We implement the more generous second sentence, since that is
what most other compilers do. */
if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (object),
destructor))
{
error_at (loc, "%qE is not of type %qT", object, destructor);
return error_mark_node;
}
}
return build3_loc (loc, PSEUDO_DTOR_EXPR, void_type_node, object,
scope, destructor);
}
/* Finish an expression of the form CODE EXPR. */
cp_expr
finish_unary_op_expr (location_t op_loc, enum tree_code code, cp_expr expr,
tsubst_flags_t complain)
{
/* Build a location of the form:
++expr
^~~~~~
with the caret at the operator token, ranging from the start
of the operator token to the end of EXPR. */
location_t combined_loc = make_location (op_loc,
op_loc, expr.get_finish ());
cp_expr result = build_x_unary_op (combined_loc, code, expr, complain);
/* TODO: build_x_unary_op doesn't always honor the location. */
result.set_location (combined_loc);
tree result_ovl, expr_ovl;
if (!(complain & tf_warning))
return result;
result_ovl = result;
expr_ovl = expr;
if (!processing_template_decl)
expr_ovl = cp_fully_fold (expr_ovl);
if (!CONSTANT_CLASS_P (expr_ovl)
|| TREE_OVERFLOW_P (expr_ovl))
return result;
if (!processing_template_decl)
result_ovl = cp_fully_fold (result_ovl);
if (CONSTANT_CLASS_P (result_ovl) && TREE_OVERFLOW_P (result_ovl))
overflow_warning (combined_loc, result_ovl);
return result;
}
/* Finish a compound-literal expression. TYPE is the type to which
the CONSTRUCTOR in COMPOUND_LITERAL is being cast. */
tree
finish_compound_literal (tree type, tree compound_literal,
tsubst_flags_t complain)
{
if (type == error_mark_node)
return error_mark_node;
if (TREE_CODE (type) == REFERENCE_TYPE)
{
compound_literal
= finish_compound_literal (TREE_TYPE (type), compound_literal,
complain);
return cp_build_c_cast (type, compound_literal, complain);
}
if (!TYPE_OBJ_P (type))
{
if (complain & tf_error)
error ("compound literal of non-object type %qT", type);
return error_mark_node;
}
if (tree anode = type_uses_auto (type))
if (CLASS_PLACEHOLDER_TEMPLATE (anode))
type = do_auto_deduction (type, compound_literal, anode, complain,
adc_variable_type);
if (processing_template_decl)
{
TREE_TYPE (compound_literal) = type;
/* Mark the expression as a compound literal. */
TREE_HAS_CONSTRUCTOR (compound_literal) = 1;
return compound_literal;
}
type = complete_type (type);
if (TYPE_NON_AGGREGATE_CLASS (type))
{
/* Trying to deal with a CONSTRUCTOR instead of a TREE_LIST
everywhere that deals with function arguments would be a pain, so
just wrap it in a TREE_LIST. The parser set a flag so we know
that it came from T{} rather than T({}). */
CONSTRUCTOR_IS_DIRECT_INIT (compound_literal) = 1;
compound_literal = build_tree_list (NULL_TREE, compound_literal);
return build_functional_cast (type, compound_literal, complain);
}
if (TREE_CODE (type) == ARRAY_TYPE
&& check_array_initializer (NULL_TREE, type, compound_literal))
return error_mark_node;
compound_literal = reshape_init (type, compound_literal, complain);
if (SCALAR_TYPE_P (type)
&& !BRACE_ENCLOSED_INITIALIZER_P (compound_literal)
&& !check_narrowing (type, compound_literal, complain))
return error_mark_node;
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
{
cp_complete_array_type_or_error (&type, compound_literal,
false, complain);
if (type == error_mark_node)
return error_mark_node;
}
compound_literal = digest_init_flags (type, compound_literal, LOOKUP_NORMAL,
complain);
if (TREE_CODE (compound_literal) == CONSTRUCTOR)
TREE_HAS_CONSTRUCTOR (compound_literal) = true;
/* Put static/constant array temporaries in static variables. */
if ((!at_function_scope_p () || CP_TYPE_CONST_P (type))
&& TREE_CODE (type) == ARRAY_TYPE
&& !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)
&& initializer_constant_valid_p (compound_literal, type))
{
tree decl = create_temporary_var (type);
DECL_INITIAL (decl) = compound_literal;
TREE_STATIC (decl) = 1;
if (literal_type_p (type) && CP_TYPE_CONST_NON_VOLATILE_P (type))
{
/* 5.19 says that a constant expression can include an
lvalue-rvalue conversion applied to "a glvalue of literal type
that refers to a non-volatile temporary object initialized
with a constant expression". Rather than try to communicate
that this VAR_DECL is a temporary, just mark it constexpr. */
DECL_DECLARED_CONSTEXPR_P (decl) = true;
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true;
TREE_CONSTANT (decl) = true;
}
cp_apply_type_quals_to_decl (cp_type_quals (type), decl);
decl = pushdecl_top_level (decl);
DECL_NAME (decl) = make_anon_name ();
SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl));
/* Make sure the destructor is callable. */
tree clean = cxx_maybe_build_cleanup (decl, complain);
if (clean == error_mark_node)
return error_mark_node;
return decl;
}
/* Represent other compound literals with TARGET_EXPR so we produce
an lvalue, but can elide copies. */
if (!VECTOR_TYPE_P (type))
compound_literal = get_target_expr_sfinae (compound_literal, complain);
return compound_literal;
}
/* Return the declaration for the function-name variable indicated by
ID. */
tree
finish_fname (tree id)
{
tree decl;
decl = fname_decl (input_location, C_RID_CODE (id), id);
if (processing_template_decl && current_function_decl
&& decl != error_mark_node)
decl = DECL_NAME (decl);
return decl;
}
/* Finish a translation unit. */
void
finish_translation_unit (void)
{
/* In case there were missing closebraces,
get us back to the global binding level. */
pop_everything ();
while (current_namespace != global_namespace)
pop_namespace ();
/* Do file scope __FUNCTION__ et al. */
finish_fname_decls ();
}
/* Finish a template type parameter, specified as AGGR IDENTIFIER.
Returns the parameter. */
tree
finish_template_type_parm (tree aggr, tree identifier)
{
if (aggr != class_type_node)
{
permerror (input_location, "template type parameters must use the keyword %<class%> or %<typename%>");
aggr = class_type_node;
}
return build_tree_list (aggr, identifier);
}
/* Finish a template template parameter, specified as AGGR IDENTIFIER.
Returns the parameter. */
tree
finish_template_template_parm (tree aggr, tree identifier)
{
tree decl = build_decl (input_location,
TYPE_DECL, identifier, NULL_TREE);
tree tmpl = build_lang_decl (TEMPLATE_DECL, identifier, NULL_TREE);
DECL_TEMPLATE_PARMS (tmpl) = current_template_parms;
DECL_TEMPLATE_RESULT (tmpl) = decl;
DECL_ARTIFICIAL (decl) = 1;
// Associate the constraints with the underlying declaration,
// not the template.
tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms);
tree constr = build_constraints (reqs, NULL_TREE);
set_constraints (decl, constr);
end_template_decl ();
gcc_assert (DECL_TEMPLATE_PARMS (tmpl));
check_default_tmpl_args (decl, DECL_TEMPLATE_PARMS (tmpl),
/*is_primary=*/true, /*is_partial=*/false,
/*is_friend=*/0);
return finish_template_type_parm (aggr, tmpl);
}
/* ARGUMENT is the default-argument value for a template template
parameter. If ARGUMENT is invalid, issue error messages and return
the ERROR_MARK_NODE. Otherwise, ARGUMENT itself is returned. */
tree
check_template_template_default_arg (tree argument)
{
if (TREE_CODE (argument) != TEMPLATE_DECL
&& TREE_CODE (argument) != TEMPLATE_TEMPLATE_PARM
&& TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE)
{
if (TREE_CODE (argument) == TYPE_DECL)
error ("invalid use of type %qT as a default value for a template "
"template-parameter", TREE_TYPE (argument));
else
error ("invalid default argument for a template template parameter");
return error_mark_node;
}
return argument;
}
/* Begin a class definition, as indicated by T. */
tree
begin_class_definition (tree t)
{
if (error_operand_p (t) || error_operand_p (TYPE_MAIN_DECL (t)))
return error_mark_node;
if (processing_template_parmlist)
{
error ("definition of %q#T inside template parameter list", t);
return error_mark_node;
}
/* According to the C++ ABI, decimal classes defined in ISO/IEC TR 24733
are passed the same as decimal scalar types. */
if (TREE_CODE (t) == RECORD_TYPE
&& !processing_template_decl)
{
tree ns = TYPE_CONTEXT (t);
if (ns && TREE_CODE (ns) == NAMESPACE_DECL
&& DECL_CONTEXT (ns) == std_node
&& DECL_NAME (ns)
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (ns)), "decimal"))
{
const char *n = TYPE_NAME_STRING (t);
if ((strcmp (n, "decimal32") == 0)
|| (strcmp (n, "decimal64") == 0)
|| (strcmp (n, "decimal128") == 0))
TYPE_TRANSPARENT_AGGR (t) = 1;
}
}
/* A non-implicit typename comes from code like:
template <typename T> struct A {
template <typename U> struct A<T>::B ...
This is erroneous. */
else if (TREE_CODE (t) == TYPENAME_TYPE)
{
error ("invalid definition of qualified type %qT", t);
t = error_mark_node;
}
if (t == error_mark_node || ! MAYBE_CLASS_TYPE_P (t))
{
t = make_class_type (RECORD_TYPE);
pushtag (make_anon_name (), t, /*tag_scope=*/ts_current);
}
if (TYPE_BEING_DEFINED (t))
{
t = make_class_type (TREE_CODE (t));
pushtag (TYPE_IDENTIFIER (t), t, /*tag_scope=*/ts_current);
}
maybe_process_partial_specialization (t);
pushclass (t);
TYPE_BEING_DEFINED (t) = 1;
class_binding_level->defining_class_p = 1;
if (flag_pack_struct)
{
tree v;
TYPE_PACKED (t) = 1;
/* Even though the type is being defined for the first time
here, there might have been a forward declaration, so there
might be cv-qualified variants of T. */
for (v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = 1;
}
/* Reset the interface data, at the earliest possible
moment, as it might have been set via a class foo;
before. */
if (! TYPE_UNNAMED_P (t))
{
struct c_fileinfo *finfo = \
get_fileinfo (LOCATION_FILE (input_location));
CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only;
SET_CLASSTYPE_INTERFACE_UNKNOWN_X
(t, finfo->interface_unknown);
}
reset_specialization();
/* Make a declaration for this class in its own scope. */
build_self_reference ();
return t;
}
/* Finish the member declaration given by DECL. */
void
finish_member_declaration (tree decl)
{
if (decl == error_mark_node || decl == NULL_TREE)
return;
if (decl == void_type_node)
/* The COMPONENT was a friend, not a member, and so there's
nothing for us to do. */
return;
/* We should see only one DECL at a time. */
gcc_assert (DECL_CHAIN (decl) == NULL_TREE);
/* Don't add decls after definition. */
gcc_assert (TYPE_BEING_DEFINED (current_class_type)
/* We can add lambda types when late parsing default
arguments. */
|| LAMBDA_TYPE_P (TREE_TYPE (decl)));
/* Set up access control for DECL. */
TREE_PRIVATE (decl)
= (current_access_specifier == access_private_node);
TREE_PROTECTED (decl)
= (current_access_specifier == access_protected_node);
if (TREE_CODE (decl) == TEMPLATE_DECL)
{
TREE_PRIVATE (DECL_TEMPLATE_RESULT (decl)) = TREE_PRIVATE (decl);
TREE_PROTECTED (DECL_TEMPLATE_RESULT (decl)) = TREE_PROTECTED (decl);
}
/* Mark the DECL as a member of the current class, unless it's
a member of an enumeration. */
if (TREE_CODE (decl) != CONST_DECL)
DECL_CONTEXT (decl) = current_class_type;
/* Check for bare parameter packs in the member variable declaration. */
if (TREE_CODE (decl) == FIELD_DECL)
{
if (check_for_bare_parameter_packs (TREE_TYPE (decl)))
TREE_TYPE (decl) = error_mark_node;
if (check_for_bare_parameter_packs (DECL_ATTRIBUTES (decl)))
DECL_ATTRIBUTES (decl) = NULL_TREE;
}
/* [dcl.link]
A C language linkage is ignored for the names of class members
and the member function type of class member functions. */
if (DECL_LANG_SPECIFIC (decl) && DECL_LANGUAGE (decl) == lang_c)
SET_DECL_LANGUAGE (decl, lang_cplusplus);
/* Put functions on the TYPE_METHODS list and everything else on the
TYPE_FIELDS list. Note that these are built up in reverse order.
We reverse them (to obtain declaration order) in finish_struct. */
if (DECL_DECLARES_FUNCTION_P (decl))
{
/* We also need to add this function to the
CLASSTYPE_METHOD_VEC. */
if (add_method (current_class_type, decl, NULL_TREE))
{
gcc_assert (TYPE_MAIN_VARIANT (current_class_type) == current_class_type);
DECL_CHAIN (decl) = TYPE_METHODS (current_class_type);
TYPE_METHODS (current_class_type) = decl;
maybe_add_class_template_decl_list (current_class_type, decl,
/*friend_p=*/0);
}
}
/* Enter the DECL into the scope of the class, if the class
isn't a closure (whose fields are supposed to be unnamed). */
else if (CLASSTYPE_LAMBDA_EXPR (current_class_type)
|| pushdecl_class_level (decl))
{
if (TREE_CODE (decl) == USING_DECL)
{
/* For now, ignore class-scope USING_DECLS, so that
debugging backends do not see them. */
DECL_IGNORED_P (decl) = 1;
}
/* All TYPE_DECLs go at the end of TYPE_FIELDS. Ordinary fields
go at the beginning. The reason is that lookup_field_1
searches the list in order, and we want a field name to
override a type name so that the "struct stat hack" will
work. In particular:
struct S { enum E { }; int E } s;
s.E = 3;
is valid. In addition, the FIELD_DECLs must be maintained in
declaration order so that class layout works as expected.
However, we don't need that order until class layout, so we
save a little time by putting FIELD_DECLs on in reverse order
here, and then reversing them in finish_struct_1. (We could
also keep a pointer to the correct insertion points in the
list.) */
if (TREE_CODE (decl) == TYPE_DECL)
TYPE_FIELDS (current_class_type)
= chainon (TYPE_FIELDS (current_class_type), decl);
else
{
DECL_CHAIN (decl) = TYPE_FIELDS (current_class_type);
TYPE_FIELDS (current_class_type) = decl;
}
maybe_add_class_template_decl_list (current_class_type, decl,
/*friend_p=*/0);
}
}
/* Finish processing a complete template declaration. The PARMS are
the template parameters. */
void
finish_template_decl (tree parms)
{
if (parms)
end_template_decl ();
else
end_specialization ();
}
// Returns the template type of the class scope being entered. If we're
// entering a constrained class scope. TYPE is the class template
// scope being entered and we may need to match the intended type with
// a constrained specialization. For example:
//
// template<Object T>
// struct S { void f(); }; #1
//
// template<Object T>
// void S<T>::f() { } #2
//
// We check, in #2, that S<T> refers precisely to the type declared by
// #1 (i.e., that the constraints match). Note that the following should
// be an error since there is no specialization of S<T> that is
// unconstrained, but this is not diagnosed here.
//
// template<typename T>
// void S<T>::f() { }
//
// We cannot diagnose this problem here since this function also matches
// qualified template names that are not part of a definition. For example:
//
// template<Integral T, Floating_point U>
// typename pair<T, U>::first_type void f(T, U);
//
// Here, it is unlikely that there is a partial specialization of
// pair constrained for for Integral and Floating_point arguments.
//
// The general rule is: if a constrained specialization with matching
// constraints is found return that type. Also note that if TYPE is not a
// class-type (e.g. a typename type), then no fixup is needed.
static tree
fixup_template_type (tree type)
{
// Find the template parameter list at the a depth appropriate to
// the scope we're trying to enter.
tree parms = current_template_parms;
int depth = template_class_depth (type);
for (int n = processing_template_decl; n > depth && parms; --n)
parms = TREE_CHAIN (parms);
if (!parms)
return type;
tree cur_reqs = TEMPLATE_PARMS_CONSTRAINTS (parms);
tree cur_constr = build_constraints (cur_reqs, NULL_TREE);
// Search for a specialization whose type and constraints match.
tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl);
while (specs)
{
tree spec_constr = get_constraints (TREE_VALUE (specs));
// If the type and constraints match a specialization, then we
// are entering that type.
if (same_type_p (type, TREE_TYPE (specs))
&& equivalent_constraints (cur_constr, spec_constr))
return TREE_TYPE (specs);
specs = TREE_CHAIN (specs);
}
// If no specialization matches, then must return the type
// previously found.
return type;
}
/* Finish processing a template-id (which names a type) of the form
NAME < ARGS >. Return the TYPE_DECL for the type named by the
template-id. If ENTERING_SCOPE is nonzero we are about to enter
the scope of template-id indicated. */
tree
finish_template_type (tree name, tree args, int entering_scope)
{
tree type;
type = lookup_template_class (name, args,
NULL_TREE, NULL_TREE, entering_scope,
tf_warning_or_error | tf_user);
/* If we might be entering the scope of a partial specialization,
find the one with the right constraints. */
if (flag_concepts
&& entering_scope
&& CLASS_TYPE_P (type)
&& CLASSTYPE_TEMPLATE_INFO (type)
&& dependent_type_p (type)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type)))
type = fixup_template_type (type);
if (type == error_mark_node)
return type;
else if (CLASS_TYPE_P (type) && !alias_type_or_template_p (type))
return TYPE_STUB_DECL (type);
else
return TYPE_NAME (type);
}
/* Finish processing a BASE_CLASS with the indicated ACCESS_SPECIFIER.
Return a TREE_LIST containing the ACCESS_SPECIFIER and the
BASE_CLASS, or NULL_TREE if an error occurred. The
ACCESS_SPECIFIER is one of
access_{default,public,protected_private}_node. For a virtual base
we set TREE_TYPE. */
tree
finish_base_specifier (tree base, tree access, bool virtual_p)
{
tree result;
if (base == error_mark_node)
{
error ("invalid base-class specification");
result = NULL_TREE;
}
else if (! MAYBE_CLASS_TYPE_P (base))
{
error ("%qT is not a class type", base);
result = NULL_TREE;
}
else
{
if (cp_type_quals (base) != 0)
{
/* DR 484: Can a base-specifier name a cv-qualified
class type? */
base = TYPE_MAIN_VARIANT (base);
}
result = build_tree_list (access, base);
if (virtual_p)
TREE_TYPE (result) = integer_type_node;
}
return result;
}
/* If FNS is a member function, a set of member functions, or a
template-id referring to one or more member functions, return a
BASELINK for FNS, incorporating the current access context.
Otherwise, return FNS unchanged. */
tree
baselink_for_fns (tree fns)
{
tree scope;
tree cl;
if (BASELINK_P (fns)
|| error_operand_p (fns))
return fns;
scope = ovl_scope (fns);
if (!CLASS_TYPE_P (scope))
return fns;
cl = currently_open_derived_class (scope);
if (!cl)
cl = scope;
cl = TYPE_BINFO (cl);
return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE);
}
/* Returns true iff DECL is a variable from a function outside
the current one. */
static bool
outer_var_p (tree decl)
{
return ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL)
&& DECL_FUNCTION_SCOPE_P (decl)
&& (DECL_CONTEXT (decl) != current_function_decl
|| parsing_nsdmi ()));
}
/* As above, but also checks that DECL is automatic. */
bool
outer_automatic_var_p (tree decl)
{
return (outer_var_p (decl)
&& !TREE_STATIC (decl));
}
/* DECL satisfies outer_automatic_var_p. Possibly complain about it or
rewrite it for lambda capture. */
tree
process_outer_var_ref (tree decl, tsubst_flags_t complain)
{
if (cp_unevaluated_operand)
/* It's not a use (3.2) if we're in an unevaluated context. */
return decl;
if (decl == error_mark_node)
return decl;
tree context = DECL_CONTEXT (decl);
tree containing_function = current_function_decl;
tree lambda_stack = NULL_TREE;
tree lambda_expr = NULL_TREE;
tree initializer = convert_from_reference (decl);
/* Mark it as used now even if the use is ill-formed. */
if (!mark_used (decl, complain))
return error_mark_node;
bool saw_generic_lambda = false;
if (parsing_nsdmi ())
containing_function = NULL_TREE;
else
/* If we are in a lambda function, we can move out until we hit
1. the context,
2. a non-lambda function, or
3. a non-default capturing lambda function. */
while (context != containing_function
/* containing_function can be null with invalid generic lambdas. */
&& containing_function
&& LAMBDA_FUNCTION_P (containing_function))
{
tree closure = DECL_CONTEXT (containing_function);
lambda_expr = CLASSTYPE_LAMBDA_EXPR (closure);
if (generic_lambda_fn_p (containing_function))
saw_generic_lambda = true;
if (TYPE_CLASS_SCOPE_P (closure))
/* A lambda in an NSDMI (c++/64496). */
break;
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr)
== CPLD_NONE)
break;
lambda_stack = tree_cons (NULL_TREE,
lambda_expr,
lambda_stack);
containing_function
= decl_function_context (containing_function);
}
/* Core issue 696: "[At the July 2009 meeting] the CWG expressed
support for an approach in which a reference to a local
[constant] automatic variable in a nested class or lambda body
would enter the expression as an rvalue, which would reduce
the complexity of the problem"
FIXME update for final resolution of core issue 696. */
if (decl_maybe_constant_var_p (decl))
{
if (processing_template_decl && !saw_generic_lambda)
/* In a non-generic lambda within a template, wait until instantiation
time to decide whether to capture. For a generic lambda, we can't
wait until we instantiate the op() because the closure class is
already defined at that point. FIXME to get the semantics exactly
right we need to partially-instantiate the lambda body so the only
dependencies left are on the generic parameters themselves. This
probably means moving away from our current model of lambdas in
templates (instantiating the closure type) to one based on creating
the closure type when instantiating the lambda context. That is
probably also the way to handle lambdas within pack expansions. */
return decl;
else if (decl_constant_var_p (decl))
{
tree t = maybe_constant_value (convert_from_reference (decl));
if (TREE_CONSTANT (t))
return t;
}
}
if (lambda_expr && VAR_P (decl)
&& DECL_ANON_UNION_VAR_P (decl))
{
if (complain & tf_error)
error ("cannot capture member %qD of anonymous union", decl);
return error_mark_node;
}
if (context == containing_function)
{
decl = add_default_capture (lambda_stack,
/*id=*/DECL_NAME (decl),
initializer);
}
else if (lambda_expr)
{
if (complain & tf_error)
{
error ("%qD is not captured", decl);
tree closure = LAMBDA_EXPR_CLOSURE (lambda_expr);
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr)
== CPLD_NONE)
inform (location_of (closure),
"the lambda has no capture-default");
else if (TYPE_CLASS_SCOPE_P (closure))
inform (0, "lambda in local class %q+T cannot "
"capture variables from the enclosing context",
TYPE_CONTEXT (closure));
inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl);
}
return error_mark_node;
}
else
{
if (complain & tf_error)
{
error (VAR_P (decl)
? G_("use of local variable with automatic storage from "
"containing function")
: G_("use of parameter from containing function"));
inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl);
}
return error_mark_node;
}
return decl;
}
/* ID_EXPRESSION is a representation of parsed, but unprocessed,
id-expression. (See cp_parser_id_expression for details.) SCOPE,
if non-NULL, is the type or namespace used to explicitly qualify
ID_EXPRESSION. DECL is the entity to which that name has been
resolved.
*CONSTANT_EXPRESSION_P is true if we are presently parsing a
constant-expression. In that case, *NON_CONSTANT_EXPRESSION_P will
be set to true if this expression isn't permitted in a
constant-expression, but it is otherwise not set by this function.
*ALLOW_NON_CONSTANT_EXPRESSION_P is true if we are parsing a
constant-expression, but a non-constant expression is also
permissible.
DONE is true if this expression is a complete postfix-expression;
it is false if this expression is followed by '->', '[', '(', etc.
ADDRESS_P is true iff this expression is the operand of '&'.
TEMPLATE_P is true iff the qualified-id was of the form
"A::template B". TEMPLATE_ARG_P is true iff this qualified name
appears as a template argument.
If an error occurs, and it is the kind of error that might cause
the parser to abort a tentative parse, *ERROR_MSG is filled in. It
is the caller's responsibility to issue the message. *ERROR_MSG
will be a string with static storage duration, so the caller need
not "free" it.
Return an expression for the entity, after issuing appropriate
diagnostics. This function is also responsible for transforming a
reference to a non-static member into a COMPONENT_REF that makes
the use of "this" explicit.
Upon return, *IDK will be filled in appropriately. */
cp_expr
finish_id_expression (tree id_expression,
tree decl,
tree scope,
cp_id_kind *idk,
bool integral_constant_expression_p,
bool allow_non_integral_constant_expression_p,
bool *non_integral_constant_expression_p,
bool template_p,
bool done,
bool address_p,
bool template_arg_p,
const char **error_msg,
location_t location)
{
decl = strip_using_decl (decl);
/* Initialize the output parameters. */
*idk = CP_ID_KIND_NONE;
*error_msg = NULL;
if (id_expression == error_mark_node)
return error_mark_node;
/* If we have a template-id, then no further lookup is
required. If the template-id was for a template-class, we
will sometimes have a TYPE_DECL at this point. */
else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
|| TREE_CODE (decl) == TYPE_DECL)
;
/* Look up the name. */
else
{
if (decl == error_mark_node)
{
/* Name lookup failed. */
if (scope
&& (!TYPE_P (scope)
|| (!dependent_type_p (scope)
&& !(identifier_p (id_expression)
&& IDENTIFIER_TYPENAME_P (id_expression)
&& dependent_type_p (TREE_TYPE (id_expression))))))
{
/* If the qualifying type is non-dependent (and the name
does not name a conversion operator to a dependent
type), issue an error. */
qualified_name_lookup_error (scope, id_expression, decl, location);
return error_mark_node;
}
else if (!scope)
{
/* It may be resolved via Koenig lookup. */
*idk = CP_ID_KIND_UNQUALIFIED;
return id_expression;
}
else
decl = id_expression;
}
/* If DECL is a variable that would be out of scope under
ANSI/ISO rules, but in scope in the ARM, name lookup
will succeed. Issue a diagnostic here. */
else
decl = check_for_out_of_scope_variable (decl);
/* Remember that the name was used in the definition of
the current class so that we can check later to see if
the meaning would have been different after the class
was entirely defined. */
if (!scope && decl != error_mark_node && identifier_p (id_expression))
maybe_note_name_used_in_class (id_expression, decl);
/* A use in unevaluated operand might not be instantiated appropriately
if tsubst_copy builds a dummy parm, or if we never instantiate a
generic lambda, so mark it now. */
if (processing_template_decl && cp_unevaluated_operand)
mark_type_use (decl);
/* Disallow uses of local variables from containing functions, except
within lambda-expressions. */
if (outer_automatic_var_p (decl))
{
decl = process_outer_var_ref (decl, tf_warning_or_error);
if (decl == error_mark_node)
return error_mark_node;
}
/* Also disallow uses of function parameters outside the function
body, except inside an unevaluated context (i.e. decltype). */
if (TREE_CODE (decl) == PARM_DECL
&& DECL_CONTEXT (decl) == NULL_TREE
&& !cp_unevaluated_operand)
{
*error_msg = G_("use of parameter outside function body");
return error_mark_node;
}
}
/* If we didn't find anything, or what we found was a type,
then this wasn't really an id-expression. */
if (TREE_CODE (decl) == TEMPLATE_DECL
&& !DECL_FUNCTION_TEMPLATE_P (decl))
{
*error_msg = G_("missing template arguments");
return error_mark_node;
}
else if (TREE_CODE (decl) == TYPE_DECL
|| TREE_CODE (decl) == NAMESPACE_DECL)
{
*error_msg = G_("expected primary-expression");
return error_mark_node;
}
/* If the name resolved to a template parameter, there is no
need to look it up again later. */
if ((TREE_CODE (decl) == CONST_DECL && DECL_TEMPLATE_PARM_P (decl))
|| TREE_CODE (decl) == TEMPLATE_PARM_INDEX)
{
tree r;
*idk = CP_ID_KIND_NONE;
if (TREE_CODE (decl) == TEMPLATE_PARM_INDEX)
decl = TEMPLATE_PARM_DECL (decl);
r = convert_from_reference (DECL_INITIAL (decl));
if (integral_constant_expression_p
&& !dependent_type_p (TREE_TYPE (decl))
&& !(INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (r))))
{
if (!allow_non_integral_constant_expression_p)
error ("template parameter %qD of type %qT is not allowed in "
"an integral constant expression because it is not of "
"integral or enumeration type", decl, TREE_TYPE (decl));
*non_integral_constant_expression_p = true;
}
return r;
}
else
{
bool dependent_p = type_dependent_expression_p (decl);
/* If the declaration was explicitly qualified indicate
that. The semantics of `A::f(3)' are different than
`f(3)' if `f' is virtual. */
*idk = (scope
? CP_ID_KIND_QUALIFIED
: (TREE_CODE (decl) == TEMPLATE_ID_EXPR
? CP_ID_KIND_TEMPLATE_ID
: (dependent_p
? CP_ID_KIND_UNQUALIFIED_DEPENDENT
: CP_ID_KIND_UNQUALIFIED)));
/* If the name was dependent on a template parameter and we're not in a
default capturing generic lambda, we will resolve the
name at instantiation time. FIXME: For lambdas, we should defer
building the closure type until instantiation time then we won't need
the extra test here. */
if (dependent_p
&& !parsing_default_capturing_generic_lambda ())
{
if (DECL_P (decl)
&& any_dependent_type_attributes_p (DECL_ATTRIBUTES (decl)))
/* Dependent type attributes on the decl mean that the TREE_TYPE is
wrong, so just return the identifier. */
return id_expression;
/* If we found a variable, then name lookup during the
instantiation will always resolve to the same VAR_DECL
(or an instantiation thereof). */
if (VAR_P (decl)
|| TREE_CODE (decl) == CONST_DECL
|| TREE_CODE (decl) == PARM_DECL)
{
mark_used (decl);
return convert_from_reference (decl);
}
/* Create a SCOPE_REF for qualified names, if the scope is
dependent. */
if (scope)
{
if (TYPE_P (scope))
{
if (address_p && done)
decl = finish_qualified_id_expr (scope, decl,
done, address_p,
template_p,
template_arg_p,
tf_warning_or_error);
else
{
tree type = NULL_TREE;
if (DECL_P (decl) && !dependent_scope_p (scope))
type = TREE_TYPE (decl);
decl = build_qualified_name (type,
scope,
id_expression,
template_p);
}
}
if (TREE_TYPE (decl))
decl = convert_from_reference (decl);
return decl;
}
/* A TEMPLATE_ID already contains all the information we
need. */
if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR)
return id_expression;
/* The same is true for FIELD_DECL, but we also need to
make sure that the syntax is correct. */
else if (TREE_CODE (decl) == FIELD_DECL)
{
/* Since SCOPE is NULL here, this is an unqualified name.
Access checking has been performed during name lookup
already. Turn off checking to avoid duplicate errors. */
push_deferring_access_checks (dk_no_check);
decl = finish_non_static_data_member
(decl, NULL_TREE,
/*qualifying_scope=*/NULL_TREE);
pop_deferring_access_checks ();
return decl;
}
return id_expression;
}
if (TREE_CODE (decl) == NAMESPACE_DECL)
{
error ("use of namespace %qD as expression", decl);
return error_mark_node;
}
else if (DECL_CLASS_TEMPLATE_P (decl))
{
error ("use of class template %qT as expression", decl);
return error_mark_node;
}
else if (TREE_CODE (decl) == TREE_LIST)
{
/* Ambiguous reference to base members. */
error ("request for member %qD is ambiguous in "
"multiple inheritance lattice", id_expression);
print_candidates (decl);
return error_mark_node;
}
/* Mark variable-like entities as used. Functions are similarly
marked either below or after overload resolution. */
if ((VAR_P (decl)
|| TREE_CODE (decl) == PARM_DECL
|| TREE_CODE (decl) == CONST_DECL
|| TREE_CODE (decl) == RESULT_DECL)
&& !mark_used (decl))
return error_mark_node;
/* Only certain kinds of names are allowed in constant
expression. Template parameters have already
been handled above. */
if (! error_operand_p (decl)
&& integral_constant_expression_p
&& ! decl_constant_var_p (decl)
&& TREE_CODE (decl) != CONST_DECL
&& ! builtin_valid_in_constant_expr_p (decl))
{
if (!allow_non_integral_constant_expression_p)
{
error ("%qD cannot appear in a constant-expression", decl);
return error_mark_node;
}
*non_integral_constant_expression_p = true;
}
tree wrap;
if (VAR_P (decl)
&& !cp_unevaluated_operand
&& !processing_template_decl
&& (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
&& CP_DECL_THREAD_LOCAL_P (decl)
&& (wrap = get_tls_wrapper_fn (decl)))
{
/* Replace an evaluated use of the thread_local variable with
a call to its wrapper. */
decl = build_cxx_call (wrap, 0, NULL, tf_warning_or_error);
}
else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& variable_template_p (TREE_OPERAND (decl, 0)))
{
decl = finish_template_variable (decl);
mark_used (decl);
decl = convert_from_reference (decl);
}
else if (scope)
{
decl = (adjust_result_of_qualified_name_lookup
(decl, scope, current_nonlambda_class_type()));
if (TREE_CODE (decl) == FUNCTION_DECL)
mark_used (decl);
if (TYPE_P (scope))
decl = finish_qualified_id_expr (scope,
decl,
done,
address_p,
template_p,
template_arg_p,
tf_warning_or_error);
else
decl = convert_from_reference (decl);
}
else if (TREE_CODE (decl) == FIELD_DECL)
{
/* Since SCOPE is NULL here, this is an unqualified name.
Access checking has been performed during name lookup
already. Turn off checking to avoid duplicate errors. */
push_deferring_access_checks (dk_no_check);
decl = finish_non_static_data_member (decl, NULL_TREE,
/*qualifying_scope=*/NULL_TREE);
pop_deferring_access_checks ();
}
else if (is_overloaded_fn (decl))
{
tree first_fn;
first_fn = get_first_fn (decl);
if (TREE_CODE (first_fn) == TEMPLATE_DECL)
first_fn = DECL_TEMPLATE_RESULT (first_fn);
/* [basic.def.odr]: "A function whose name appears as a
potentially-evaluated expression is odr-used if it is the unique
lookup result".
But only mark it if it's a complete postfix-expression; in a call,
ADL might select a different function, and we'll call mark_used in
build_over_call. */
if (done
&& !really_overloaded_fn (decl)
&& !mark_used (first_fn))
return error_mark_node;
if (!template_arg_p
&& TREE_CODE (first_fn) == FUNCTION_DECL
&& DECL_FUNCTION_MEMBER_P (first_fn)
&& !shared_member_p (decl))
{
/* A set of member functions. */
decl = maybe_dummy_object (DECL_CONTEXT (first_fn), 0);
return finish_class_member_access_expr (decl, id_expression,
/*template_p=*/false,
tf_warning_or_error);
}
decl = baselink_for_fns (decl);
}
else
{
if (DECL_P (decl) && DECL_NONLOCAL (decl)
&& DECL_CLASS_SCOPE_P (decl))
{
tree context = context_for_name_lookup (decl);
if (context != current_class_type)
{
tree path = currently_open_derived_class (context);
perform_or_defer_access_check (TYPE_BINFO (path),
decl, decl,
tf_warning_or_error);
}
}
decl = convert_from_reference (decl);
}
}
return cp_expr (decl, location);
}
/* Implement the __typeof keyword: Return the type of EXPR, suitable for
use as a type-specifier. */
tree
finish_typeof (tree expr)
{
tree type;
if (type_dependent_expression_p (expr))
{
type = cxx_make_type (TYPEOF_TYPE);
TYPEOF_TYPE_EXPR (type) = expr;
SET_TYPE_STRUCTURAL_EQUALITY (type);
return type;
}
expr = mark_type_use (expr);
type = unlowered_expr_type (expr);
if (!type || type == unknown_type_node)
{
error ("type of %qE is unknown", expr);
return error_mark_node;
}
return type;
}
/* Implement the __underlying_type keyword: Return the underlying
type of TYPE, suitable for use as a type-specifier. */
tree
finish_underlying_type (tree type)
{
tree underlying_type;
if (processing_template_decl)
{
underlying_type = cxx_make_type (UNDERLYING_TYPE);
UNDERLYING_TYPE_TYPE (underlying_type) = type;
SET_TYPE_STRUCTURAL_EQUALITY (underlying_type);
return underlying_type;
}
if (!complete_type_or_else (type, NULL_TREE))
return error_mark_node;
if (TREE_CODE (type) != ENUMERAL_TYPE)
{
error ("%qT is not an enumeration type", type);
return error_mark_node;
}
underlying_type = ENUM_UNDERLYING_TYPE (type);
/* Fixup necessary in this case because ENUM_UNDERLYING_TYPE
includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information.
See finish_enum_value_list for details. */
if (!ENUM_FIXED_UNDERLYING_TYPE_P (type))
underlying_type
= c_common_type_for_mode (TYPE_MODE (underlying_type),
TYPE_UNSIGNED (underlying_type));
return underlying_type;
}
/* Implement the __direct_bases keyword: Return the direct base classes
of type */
tree
calculate_direct_bases (tree type)
{
vec<tree, va_gc> *vector = make_tree_vector();
tree bases_vec = NULL_TREE;
vec<tree, va_gc> *base_binfos;
tree binfo;
unsigned i;
complete_type (type);
if (!NON_UNION_CLASS_TYPE_P (type))
return make_tree_vec (0);
base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type));
/* Virtual bases are initialized first */
for (i = 0; base_binfos->iterate (i, &binfo); i++)
{
if (BINFO_VIRTUAL_P (binfo))
{
vec_safe_push (vector, binfo);
}
}
/* Now non-virtuals */
for (i = 0; base_binfos->iterate (i, &binfo); i++)
{
if (!BINFO_VIRTUAL_P (binfo))
{
vec_safe_push (vector, binfo);
}
}
bases_vec = make_tree_vec (vector->length ());
for (i = 0; i < vector->length (); ++i)
{
TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]);
}
return bases_vec;
}
/* Implement the __bases keyword: Return the base classes
of type */
/* Find morally non-virtual base classes by walking binfo hierarchy */
/* Virtual base classes are handled separately in finish_bases */
static tree
dfs_calculate_bases_pre (tree binfo, void * /*data_*/)
{
/* Don't walk bases of virtual bases */
return BINFO_VIRTUAL_P (binfo) ? dfs_skip_bases : NULL_TREE;
}
static tree
dfs_calculate_bases_post (tree binfo, void *data_)
{
vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_);
if (!BINFO_VIRTUAL_P (binfo))
{
vec_safe_push (*data, BINFO_TYPE (binfo));
}
return NULL_TREE;
}
/* Calculates the morally non-virtual base classes of a class */
static vec<tree, va_gc> *
calculate_bases_helper (tree type)
{
vec<tree, va_gc> *vector = make_tree_vector();
/* Now add non-virtual base classes in order of construction */
if (TYPE_BINFO (type))
dfs_walk_all (TYPE_BINFO (type),
dfs_calculate_bases_pre, dfs_calculate_bases_post, &vector);
return vector;
}
tree
calculate_bases (tree type)
{
vec<tree, va_gc> *vector = make_tree_vector();
tree bases_vec = NULL_TREE;
unsigned i;
vec<tree, va_gc> *vbases;
vec<tree, va_gc> *nonvbases;
tree binfo;
complete_type (type);
if (!NON_UNION_CLASS_TYPE_P (type))
return make_tree_vec (0);
/* First go through virtual base classes */
for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0;
vec_safe_iterate (vbases, i, &binfo); i++)
{
vec<tree, va_gc> *vbase_bases;
vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo));
vec_safe_splice (vector, vbase_bases);
release_tree_vector (vbase_bases);
}
/* Now for the non-virtual bases */
nonvbases = calculate_bases_helper (type);
vec_safe_splice (vector, nonvbases);
release_tree_vector (nonvbases);
/* Note that during error recovery vector->length can even be zero. */
if (vector->length () > 1)
{
/* Last element is entire class, so don't copy */
bases_vec = make_tree_vec (vector->length() - 1);
for (i = 0; i < vector->length () - 1; ++i)
TREE_VEC_ELT (bases_vec, i) = (*vector)[i];
}
else
bases_vec = make_tree_vec (0);
release_tree_vector (vector);
return bases_vec;
}
tree
finish_bases (tree type, bool direct)
{
tree bases = NULL_TREE;
if (!processing_template_decl)
{
/* Parameter packs can only be used in templates */
error ("Parameter pack __bases only valid in template declaration");
return error_mark_node;
}
bases = cxx_make_type (BASES);
BASES_TYPE (bases) = type;
BASES_DIRECT (bases) = direct;
SET_TYPE_STRUCTURAL_EQUALITY (bases);
return bases;
}
/* Perform C++-specific checks for __builtin_offsetof before calling
fold_offsetof. */
tree
finish_offsetof (tree object_ptr, tree expr, location_t loc)
{
/* If we're processing a template, we can't finish the semantics yet.
Otherwise we can fold the entire expression now. */
if (processing_template_decl)
{
expr = build2 (OFFSETOF_EXPR, size_type_node, expr, object_ptr);
SET_EXPR_LOCATION (expr, loc);
return expr;
}
if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR)
{
error ("cannot apply %<offsetof%> to destructor %<~%T%>",
TREE_OPERAND (expr, 2));
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE
|| TREE_CODE (TREE_TYPE (expr)) == METHOD_TYPE
|| TREE_TYPE (expr) == unknown_type_node)
{
if (INDIRECT_REF_P (expr))
error ("second operand of %<offsetof%> is neither a single "
"identifier nor a sequence of member accesses and "
"array references");
else
{
if (TREE_CODE (expr) == COMPONENT_REF
|| TREE_CODE (expr) == COMPOUND_EXPR)
expr = TREE_OPERAND (expr, 1);
error ("cannot apply %<offsetof%> to member function %qD", expr);
}
return error_mark_node;
}
if (REFERENCE_REF_P (expr))
expr = TREE_OPERAND (expr, 0);
if (!complete_type_or_else (TREE_TYPE (TREE_TYPE (object_ptr)), object_ptr))
return error_mark_node;
if (warn_invalid_offsetof
&& CLASS_TYPE_P (TREE_TYPE (TREE_TYPE (object_ptr)))
&& CLASSTYPE_NON_STD_LAYOUT (TREE_TYPE (TREE_TYPE (object_ptr)))
&& cp_unevaluated_operand == 0)
pedwarn (loc, OPT_Winvalid_offsetof,
"offsetof within non-standard-layout type %qT is undefined",
TREE_TYPE (TREE_TYPE (object_ptr)));
return fold_offsetof (expr);
}
/* Replace the AGGR_INIT_EXPR at *TP with an equivalent CALL_EXPR. This
function is broken out from the above for the benefit of the tree-ssa
project. */
void
simplify_aggr_init_expr (tree *tp)
{
tree aggr_init_expr = *tp;
/* Form an appropriate CALL_EXPR. */
tree fn = AGGR_INIT_EXPR_FN (aggr_init_expr);
tree slot = AGGR_INIT_EXPR_SLOT (aggr_init_expr);
tree type = TREE_TYPE (slot);
tree call_expr;
enum style_t { ctor, arg, pcc } style;
if (AGGR_INIT_VIA_CTOR_P (aggr_init_expr))
style = ctor;
#ifdef PCC_STATIC_STRUCT_RETURN
else if (1)
style = pcc;
#endif
else
{
gcc_assert (TREE_ADDRESSABLE (type));
style = arg;
}
call_expr = build_call_array_loc (input_location,
TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))),
fn,
aggr_init_expr_nargs (aggr_init_expr),
AGGR_INIT_EXPR_ARGP (aggr_init_expr));
TREE_NOTHROW (call_expr) = TREE_NOTHROW (aggr_init_expr);
CALL_FROM_THUNK_P (call_expr) = AGGR_INIT_FROM_THUNK_P (aggr_init_expr);
CALL_EXPR_OPERATOR_SYNTAX (call_expr)
= CALL_EXPR_OPERATOR_SYNTAX (aggr_init_expr);
CALL_EXPR_ORDERED_ARGS (call_expr) = CALL_EXPR_ORDERED_ARGS (aggr_init_expr);
CALL_EXPR_REVERSE_ARGS (call_expr) = CALL_EXPR_REVERSE_ARGS (aggr_init_expr);
if (style == ctor)
{
/* Replace the first argument to the ctor with the address of the
slot. */
cxx_mark_addressable (slot);
CALL_EXPR_ARG (call_expr, 0) =
build1 (ADDR_EXPR, build_pointer_type (type), slot);
}
else if (style == arg)
{
/* Just mark it addressable here, and leave the rest to
expand_call{,_inline}. */
cxx_mark_addressable (slot);
CALL_EXPR_RETURN_SLOT_OPT (call_expr) = true;
call_expr = build2 (INIT_EXPR, TREE_TYPE (call_expr), slot, call_expr);
}
else if (style == pcc)
{
/* If we're using the non-reentrant PCC calling convention, then we
need to copy the returned value out of the static buffer into the
SLOT. */
push_deferring_access_checks (dk_no_check);
call_expr = build_aggr_init (slot, call_expr,
DIRECT_BIND | LOOKUP_ONLYCONVERTING,
tf_warning_or_error);
pop_deferring_access_checks ();
call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (slot), call_expr, slot);
}
if (AGGR_INIT_ZERO_FIRST (aggr_init_expr))
{
tree init = build_zero_init (type, NULL_TREE,
/*static_storage_p=*/false);
init = build2 (INIT_EXPR, void_type_node, slot, init);
call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (call_expr),
init, call_expr);
}
*tp = call_expr;
}
/* Emit all thunks to FN that should be emitted when FN is emitted. */
void
emit_associated_thunks (tree fn)
{
/* When we use vcall offsets, we emit thunks with the virtual
functions to which they thunk. The whole point of vcall offsets
is so that you can know statically the entire set of thunks that
will ever be needed for a given virtual function, thereby
enabling you to output all the thunks with the function itself. */
if (DECL_VIRTUAL_P (fn)
/* Do not emit thunks for extern template instantiations. */
&& ! DECL_REALLY_EXTERN (fn))
{
tree thunk;
for (thunk = DECL_THUNKS (fn); thunk; thunk = DECL_CHAIN (thunk))
{
if (!THUNK_ALIAS (thunk))
{
use_thunk (thunk, /*emit_p=*/1);
if (DECL_RESULT_THUNK_P (thunk))
{
tree probe;
for (probe = DECL_THUNKS (thunk);
probe; probe = DECL_CHAIN (probe))
use_thunk (probe, /*emit_p=*/1);
}
}
else
gcc_assert (!DECL_THUNKS (thunk));
}
}
}
/* Generate RTL for FN. */
bool
expand_or_defer_fn_1 (tree fn)
{
/* When the parser calls us after finishing the body of a template
function, we don't really want to expand the body. */
if (processing_template_decl)
{
/* Normally, collection only occurs in rest_of_compilation. So,
if we don't collect here, we never collect junk generated
during the processing of templates until we hit a
non-template function. It's not safe to do this inside a
nested class, though, as the parser may have local state that
is not a GC root. */
if (!function_depth)
ggc_collect ();
return false;
}
gcc_assert (DECL_SAVED_TREE (fn));
/* We make a decision about linkage for these functions at the end
of the compilation. Until that point, we do not want the back
end to output them -- but we do want it to see the bodies of
these functions so that it can inline them as appropriate. */
if (DECL_DECLARED_INLINE_P (fn) || DECL_IMPLICIT_INSTANTIATION (fn))
{
if (DECL_INTERFACE_KNOWN (fn))
/* We've already made a decision as to how this function will
be handled. */;
else if (!at_eof)
tentative_decl_linkage (fn);
else
import_export_decl (fn);
/* If the user wants us to keep all inline functions, then mark
this function as needed so that finish_file will make sure to
output it later. Similarly, all dllexport'd functions must
be emitted; there may be callers in other DLLs. */
if (DECL_DECLARED_INLINE_P (fn)
&& !DECL_REALLY_EXTERN (fn)
&& (flag_keep_inline_functions
|| (flag_keep_inline_dllexport
&& lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn)))))
{
mark_needed (fn);
DECL_EXTERNAL (fn) = 0;
}
}
/* If this is a constructor or destructor body, we have to clone
it. */
if (maybe_clone_body (fn))
{
/* We don't want to process FN again, so pretend we've written
it out, even though we haven't. */
TREE_ASM_WRITTEN (fn) = 1;
/* If this is a constexpr function, keep DECL_SAVED_TREE. */
if (!DECL_DECLARED_CONSTEXPR_P (fn))
DECL_SAVED_TREE (fn) = NULL_TREE;
return false;
}
/* There's no reason to do any of the work here if we're only doing
semantic analysis; this code just generates RTL. */
if (flag_syntax_only)
return false;
return true;
}
void
expand_or_defer_fn (tree fn)
{
if (expand_or_defer_fn_1 (fn))
{
function_depth++;
/* Expand or defer, at the whim of the compilation unit manager. */
cgraph_node::finalize_function (fn, function_depth > 1);
emit_associated_thunks (fn);
function_depth--;
}
}
struct nrv_data
{
nrv_data () : visited (37) {}
tree var;
tree result;
hash_table<nofree_ptr_hash <tree_node> > visited;
};
/* Helper function for walk_tree, used by finalize_nrv below. */
static tree
finalize_nrv_r (tree* tp, int* walk_subtrees, void* data)
{
struct nrv_data *dp = (struct nrv_data *)data;
tree_node **slot;
/* No need to walk into types. There wouldn't be any need to walk into
non-statements, except that we have to consider STMT_EXPRs. */
if (TYPE_P (*tp))
*walk_subtrees = 0;
/* Change all returns to just refer to the RESULT_DECL; this is a nop,
but differs from using NULL_TREE in that it indicates that we care
about the value of the RESULT_DECL. */
else if (TREE_CODE (*tp) == RETURN_EXPR)
TREE_OPERAND (*tp, 0) = dp->result;
/* Change all cleanups for the NRV to only run when an exception is
thrown. */
else if (TREE_CODE (*tp) == CLEANUP_STMT
&& CLEANUP_DECL (*tp) == dp->var)
CLEANUP_EH_ONLY (*tp) = 1;
/* Replace the DECL_EXPR for the NRV with an initialization of the
RESULT_DECL, if needed. */
else if (TREE_CODE (*tp) == DECL_EXPR
&& DECL_EXPR_DECL (*tp) == dp->var)
{
tree init;
if (DECL_INITIAL (dp->var)
&& DECL_INITIAL (dp->var) != error_mark_node)
init = build2 (INIT_EXPR, void_type_node, dp->result,
DECL_INITIAL (dp->var));
else
init = build_empty_stmt (EXPR_LOCATION (*tp));
DECL_INITIAL (dp->var) = NULL_TREE;
SET_EXPR_LOCATION (init, EXPR_LOCATION (*tp));
*tp = init;
}
/* And replace all uses of the NRV with the RESULT_DECL. */
else if (*tp == dp->var)
*tp = dp->result;
/* Avoid walking into the same tree more than once. Unfortunately, we
can't just use walk_tree_without duplicates because it would only call
us for the first occurrence of dp->var in the function body. */
slot = dp->visited.find_slot (*tp, INSERT);
if (*slot)
*walk_subtrees = 0;
else
*slot = *tp;
/* Keep iterating. */
return NULL_TREE;
}
/* Called from finish_function to implement the named return value
optimization by overriding all the RETURN_EXPRs and pertinent
CLEANUP_STMTs and replacing all occurrences of VAR with RESULT, the
RESULT_DECL for the function. */
void
finalize_nrv (tree *tp, tree var, tree result)
{
struct nrv_data data;
/* Copy name from VAR to RESULT. */
DECL_NAME (result) = DECL_NAME (var);
/* Don't forget that we take its address. */
TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (var);
/* Finally set DECL_VALUE_EXPR to avoid assigning
a stack slot at -O0 for the original var and debug info
uses RESULT location for VAR. */
SET_DECL_VALUE_EXPR (var, result);
DECL_HAS_VALUE_EXPR_P (var) = 1;
data.var = var;
data.result = result;
cp_walk_tree (tp, finalize_nrv_r, &data, 0);
}
/* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */
bool
cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor,
bool need_copy_ctor, bool need_copy_assignment,
bool need_dtor)
{
int save_errorcount = errorcount;
tree info, t;
/* Always allocate 3 elements for simplicity. These are the
function decls for the ctor, dtor, and assignment op.
This layout is known to the three lang hooks,
cxx_omp_clause_default_init, cxx_omp_clause_copy_init,
and cxx_omp_clause_assign_op. */
info = make_tree_vec (3);
CP_OMP_CLAUSE_INFO (c) = info;
if (need_default_ctor || need_copy_ctor)
{
if (need_default_ctor)
t = get_default_ctor (type);
else
t = get_copy_ctor (type, tf_warning_or_error);
if (t && !trivial_fn_p (t))
TREE_VEC_ELT (info, 0) = t;
}
if (need_dtor && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type))
TREE_VEC_ELT (info, 1) = get_dtor (type, tf_warning_or_error);
if (need_copy_assignment)
{
t = get_copy_assign (type);
if (t && !trivial_fn_p (t))
TREE_VEC_ELT (info, 2) = t;
}
return errorcount != save_errorcount;
}
/* If DECL is DECL_OMP_PRIVATIZED_MEMBER, return corresponding
FIELD_DECL, otherwise return DECL itself. */
static tree
omp_clause_decl_field (tree decl)
{
if (VAR_P (decl)
&& DECL_HAS_VALUE_EXPR_P (decl)
&& DECL_ARTIFICIAL (decl)
&& DECL_LANG_SPECIFIC (decl)
&& DECL_OMP_PRIVATIZED_MEMBER (decl))
{
tree f = DECL_VALUE_EXPR (decl);
if (TREE_CODE (f) == INDIRECT_REF)
f = TREE_OPERAND (f, 0);
if (TREE_CODE (f) == COMPONENT_REF)
{
f = TREE_OPERAND (f, 1);
gcc_assert (TREE_CODE (f) == FIELD_DECL);
return f;
}
}
return NULL_TREE;
}
/* Adjust DECL if needed for printing using %qE. */
static tree
omp_clause_printable_decl (tree decl)
{
tree t = omp_clause_decl_field (decl);
if (t)
return t;
return decl;
}
/* For a FIELD_DECL F and corresponding DECL_OMP_PRIVATIZED_MEMBER
VAR_DECL T that doesn't need a DECL_EXPR added, record it for
privatization. */
static void
omp_note_field_privatization (tree f, tree t)
{
if (!omp_private_member_map)
omp_private_member_map = new hash_map<tree, tree>;
tree &v = omp_private_member_map->get_or_insert (f);
if (v == NULL_TREE)
{
v = t;
omp_private_member_vec.safe_push (f);
/* Signal that we don't want to create DECL_EXPR for this dummy var. */
omp_private_member_vec.safe_push (integer_zero_node);
}
}
/* Privatize FIELD_DECL T, return corresponding DECL_OMP_PRIVATIZED_MEMBER
dummy VAR_DECL. */
tree
omp_privatize_field (tree t, bool shared)
{
tree m = finish_non_static_data_member (t, NULL_TREE, NULL_TREE);
if (m == error_mark_node)
return error_mark_node;
if (!omp_private_member_map && !shared)
omp_private_member_map = new hash_map<tree, tree>;
if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE)
{
gcc_assert (TREE_CODE (m) == INDIRECT_REF);
m = TREE_OPERAND (m, 0);
}
tree vb = NULL_TREE;
tree &v = shared ? vb : omp_private_member_map->get_or_insert (t);
if (v == NULL_TREE)
{
v = create_temporary_var (TREE_TYPE (m));
if (!DECL_LANG_SPECIFIC (v))
retrofit_lang_decl (v);
DECL_OMP_PRIVATIZED_MEMBER (v) = 1;
SET_DECL_VALUE_EXPR (v, m);
DECL_HAS_VALUE_EXPR_P (v) = 1;
if (!shared)
omp_private_member_vec.safe_push (t);
}
return v;
}
/* Helper function for handle_omp_array_sections. Called recursively
to handle multiple array-section-subscripts. C is the clause,
T current expression (initially OMP_CLAUSE_DECL), which is either
a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound
expression if specified, TREE_VALUE length expression if specified,
TREE_CHAIN is what it has been specified after, or some decl.
TYPES vector is populated with array section types, MAYBE_ZERO_LEN
set to true if any of the array-section-subscript could have length
of zero (explicit or implicit), FIRST_NON_ONE is the index of the
first array-section-subscript which is known not to have length
of one. Given say:
map(a[:b][2:1][:c][:2][:d][e:f][2:5])
FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c]
all are or may have length of 1, array-section-subscript [:2] is the
first one known not to have length 1. For array-section-subscript
<= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't
0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we
can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above
case though, as some lengths could be zero. */
static tree
handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
bool &maybe_zero_len, unsigned int &first_non_one,
enum c_omp_region_type ort)
{
tree ret, low_bound, length, type;
if (TREE_CODE (t) != TREE_LIST)
{
if (error_operand_p (t))
return error_mark_node;
if (REFERENCE_REF_P (t)
&& TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF)
t = TREE_OPERAND (t, 0);
ret = t;
if (TREE_CODE (t) == COMPONENT_REF
&& ort == C_ORT_OMP
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM)
&& !type_dependent_expression_p (t))
{
if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL
&& DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
while (TREE_CODE (t) == COMPONENT_REF)
{
if (TREE_TYPE (TREE_OPERAND (t, 0))
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is a member of a union", t);
return error_mark_node;
}
t = TREE_OPERAND (t, 0);
}
if (REFERENCE_REF_P (t))
t = TREE_OPERAND (t, 0);
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
return NULL_TREE;
if (DECL_P (t))
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
else if (TREE_CODE (t) == PARM_DECL
&& DECL_ARTIFICIAL (t)
&& DECL_NAME (t) == this_identifier)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<this%> allowed in OpenMP only in %<declare simd%>"
" clauses");
return error_mark_node;
}
else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (type_dependent_expression_p (ret))
return NULL_TREE;
ret = convert_from_reference (ret);
return ret;
}
if (ort == C_ORT_OMP
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& TREE_CODE (TREE_CHAIN (t)) == FIELD_DECL)
TREE_CHAIN (t) = omp_privatize_field (TREE_CHAIN (t), false);
ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types,
maybe_zero_len, first_non_one, ort);
if (ret == error_mark_node || ret == NULL_TREE)
return ret;
type = TREE_TYPE (ret);
low_bound = TREE_PURPOSE (t);
length = TREE_VALUE (t);
if ((low_bound && type_dependent_expression_p (low_bound))
|| (length && type_dependent_expression_p (length)))
return NULL_TREE;
if (low_bound == error_mark_node || length == error_mark_node)
return error_mark_node;
if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"low bound %qE of array section does not have integral type",
low_bound);
return error_mark_node;
}
if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"length %qE of array section does not have integral type",
length);
return error_mark_node;
}
if (low_bound)
low_bound = mark_rvalue_use (low_bound);
if (length)
length = mark_rvalue_use (length);
/* We need to reduce to real constant-values for checks below. */
if (length)
length = fold_simple (length);
if (low_bound)
low_bound = fold_simple (low_bound);
if (low_bound
&& TREE_CODE (low_bound) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (low_bound))
> TYPE_PRECISION (sizetype))
low_bound = fold_convert (sizetype, low_bound);
if (length
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
if (length != NULL_TREE)
{
if (!integer_nonzerop (length))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
if (integer_zerop (length))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
else
maybe_zero_len = true;
}
if (first_non_one == types.length ()
&& (TREE_CODE (length) != INTEGER_CST || integer_onep (length)))
first_non_one++;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (length == NULL_TREE
&& (TYPE_DOMAIN (type) == NULL_TREE
|| TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE))
{
error_at (OMP_CLAUSE_LOCATION (c),
"for unknown bound array type length expression must "
"be specified");
return error_mark_node;
}
if (TREE_CODE (low_bound) == INTEGER_CST
&& tree_int_cst_sgn (low_bound) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative low bound in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& tree_int_cst_sgn (length) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative length in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
== INTEGER_CST)
{
tree size
= fold_convert (sizetype, TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
size = size_binop (PLUS_EXPR, size, size_one_node);
if (TREE_CODE (low_bound) == INTEGER_CST)
{
if (tree_int_cst_lt (size, low_bound))
{
error_at (OMP_CLAUSE_LOCATION (c),
"low bound %qE above array section size "
"in %qs clause", low_bound,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (tree_int_cst_equal (size, low_bound))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
maybe_zero_len = true;
}
else if (length == NULL_TREE
&& first_non_one == types.length ()
&& tree_int_cst_equal
(TYPE_MAX_VALUE (TYPE_DOMAIN (type)),
low_bound))
first_non_one++;
}
else if (length == NULL_TREE)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
maybe_zero_len = true;
if (first_non_one == types.length ())
first_non_one++;
}
if (length && TREE_CODE (length) == INTEGER_CST)
{
if (tree_int_cst_lt (size, length))
{
error_at (OMP_CLAUSE_LOCATION (c),
"length %qE above array section size "
"in %qs clause", length,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TREE_CODE (low_bound) == INTEGER_CST)
{
tree lbpluslen
= size_binop (PLUS_EXPR,
fold_convert (sizetype, low_bound),
fold_convert (sizetype, length));
if (TREE_CODE (lbpluslen) == INTEGER_CST
&& tree_int_cst_lt (size, lbpluslen))
{
error_at (OMP_CLAUSE_LOCATION (c),
"high bound %qE above array section size "
"in %qs clause", lbpluslen,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
}
}
else if (length == NULL_TREE)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
maybe_zero_len = true;
if (first_non_one == types.length ())
first_non_one++;
}
/* For [lb:] we will need to evaluate lb more than once. */
if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
{
tree lb = cp_save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
low_bound = lb;
}
}
}
else if (TREE_CODE (type) == POINTER_TYPE)
{
if (length == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"for pointer type length expression must be specified");
return error_mark_node;
}
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& tree_int_cst_sgn (length) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative length in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
/* If there is a pointer type anywhere but in the very first
array-section-subscript, the array section can't be contiguous. */
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TREE_CODE (TREE_CHAIN (t)) == TREE_LIST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"array section is not contiguous in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
else
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have pointer or array type", ret);
return error_mark_node;
}
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
types.safe_push (TREE_TYPE (ret));
/* We will need to evaluate lb more than once. */
tree lb = cp_save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
low_bound = lb;
}
ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, false);
return ret;
}
/* Handle array sections for clause C. */
static bool
handle_omp_array_sections (tree c, enum c_omp_region_type ort)
{
bool maybe_zero_len = false;
unsigned int first_non_one = 0;
auto_vec<tree, 10> types;
tree first = handle_omp_array_sections_1 (c, OMP_CLAUSE_DECL (c), types,
maybe_zero_len, first_non_one,
ort);
if (first == error_mark_node)
return true;
if (first == NULL_TREE)
return false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
{
tree t = OMP_CLAUSE_DECL (c);
tree tem = NULL_TREE;
if (processing_template_decl)
return false;
/* Need to evaluate side effects in the length expressions
if any. */
while (TREE_CODE (t) == TREE_LIST)
{
if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t)))
{
if (tem == NULL_TREE)
tem = TREE_VALUE (t);
else
tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem),
TREE_VALUE (t), tem);
}
t = TREE_CHAIN (t);
}
if (tem)
first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first);
OMP_CLAUSE_DECL (c) = first;
}
else
{
unsigned int num = types.length (), i;
tree t, side_effects = NULL_TREE, size = NULL_TREE;
tree condition = NULL_TREE;
if (int_size_in_bytes (TREE_TYPE (first)) <= 0)
maybe_zero_len = true;
if (processing_template_decl && maybe_zero_len)
return false;
for (i = num, t = OMP_CLAUSE_DECL (c); i > 0;
t = TREE_CHAIN (t))
{
tree low_bound = TREE_PURPOSE (t);
tree length = TREE_VALUE (t);
i--;
if (low_bound
&& TREE_CODE (low_bound) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (low_bound))
> TYPE_PRECISION (sizetype))
low_bound = fold_convert (sizetype, low_bound);
if (length
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
if (!maybe_zero_len && i > first_non_one)
{
if (integer_nonzerop (low_bound))
goto do_warn_noncontiguous;
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_DOMAIN (types[i])
&& TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))
&& TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])))
== INTEGER_CST)
{
tree size;
size = size_binop (PLUS_EXPR,
TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])),
size_one_node);
if (!tree_int_cst_equal (length, size))
{
do_warn_noncontiguous:
error_at (OMP_CLAUSE_LOCATION (c),
"array section is not contiguous in %qs "
"clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return true;
}
}
if (!processing_template_decl
&& length != NULL_TREE
&& TREE_SIDE_EFFECTS (length))
{
if (side_effects == NULL_TREE)
side_effects = length;
else
side_effects = build2 (COMPOUND_EXPR,
TREE_TYPE (side_effects),
length, side_effects);
}
}
else if (processing_template_decl)
continue;
else
{
tree l;
if (i > first_non_one
&& ((length && integer_nonzerop (length))
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION))
continue;
if (length)
l = fold_convert (sizetype, length);
else
{
l = size_binop (PLUS_EXPR,
TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])),
size_one_node);
l = size_binop (MINUS_EXPR, l,
fold_convert (sizetype, low_bound));
}
if (i > first_non_one)
{
l = fold_build2 (NE_EXPR, boolean_type_node, l,
size_zero_node);
if (condition == NULL_TREE)
condition = l;
else
condition = fold_build2 (BIT_AND_EXPR, boolean_type_node,
l, condition);
}
else if (size == NULL_TREE)
{
size = size_in_bytes (TREE_TYPE (types[i]));
tree eltype = TREE_TYPE (types[num - 1]);
while (TREE_CODE (eltype) == ARRAY_TYPE)
eltype = TREE_TYPE (eltype);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
size = size_binop (EXACT_DIV_EXPR, size,
size_in_bytes (eltype));
size = size_binop (MULT_EXPR, size, l);
if (condition)
size = fold_build3 (COND_EXPR, sizetype, condition,
size, size_zero_node);
}
else
size = size_binop (MULT_EXPR, size, l);
}
}
if (!processing_template_decl)
{
if (side_effects)
size = build2 (COMPOUND_EXPR, sizetype, side_effects, size);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
size = size_binop (MINUS_EXPR, size, size_one_node);
tree index_type = build_index_type (size);
tree eltype = TREE_TYPE (first);
while (TREE_CODE (eltype) == ARRAY_TYPE)
eltype = TREE_TYPE (eltype);
tree type = build_array_type (eltype, index_type);
tree ptype = build_pointer_type (eltype);
if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (t))))
t = convert_from_reference (t);
else if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
t = build_fold_addr_expr (t);
tree t2 = build_fold_addr_expr (first);
t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, t2);
t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
ptrdiff_type_node, t2,
fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, t));
if (tree_fits_shwi_p (t2))
t = build2 (MEM_REF, type, t,
build_int_cst (ptype, tree_to_shwi (t2)));
else
{
t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
sizetype, t2);
t = build2_loc (OMP_CLAUSE_LOCATION (c), POINTER_PLUS_EXPR,
TREE_TYPE (t), t, t2);
t = build2 (MEM_REF, type, t, build_int_cst (ptype, 0));
}
OMP_CLAUSE_DECL (c) = t;
return false;
}
OMP_CLAUSE_DECL (c) = first;
OMP_CLAUSE_SIZE (c) = size;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (TREE_CODE (t) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE))
return false;
if (ort == C_ORT_OMP || ort == C_ORT_ACC)
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_DELETE:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1;
break;
default:
break;
}
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
if ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP && ort != C_ORT_ACC)
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER);
else if (TREE_CODE (t) == COMPONENT_REF)
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALWAYS_POINTER);
else if (REFERENCE_REF_P (t)
&& TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF)
{
t = TREE_OPERAND (t, 0);
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALWAYS_POINTER);
}
else
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_POINTER);
if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& !cxx_mark_addressable (t))
return false;
OMP_CLAUSE_DECL (c2) = t;
t = build_fold_addr_expr (first);
t = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, t);
tree ptr = OMP_CLAUSE_DECL (c2);
ptr = convert_from_reference (ptr);
if (!POINTER_TYPE_P (TREE_TYPE (ptr)))
ptr = build_fold_addr_expr (ptr);
t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
ptrdiff_type_node, t,
fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, ptr));
OMP_CLAUSE_SIZE (c2) = t;
OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = c2;
ptr = OMP_CLAUSE_DECL (c2);
if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& TREE_CODE (TREE_TYPE (ptr)) == REFERENCE_TYPE
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (ptr))))
{
tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c3, OMP_CLAUSE_MAP_KIND (c2));
OMP_CLAUSE_DECL (c3) = ptr;
if (OMP_CLAUSE_MAP_KIND (c2) == GOMP_MAP_ALWAYS_POINTER)
OMP_CLAUSE_DECL (c2) = build_simple_mem_ref (ptr);
else
OMP_CLAUSE_DECL (c2) = convert_from_reference (ptr);
OMP_CLAUSE_SIZE (c3) = size_zero_node;
OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2);
OMP_CLAUSE_CHAIN (c2) = c3;
}
}
}
return false;
}
/* Return identifier to look up for omp declare reduction. */
tree
omp_reduction_id (enum tree_code reduction_code, tree reduction_id, tree type)
{
const char *p = NULL;
const char *m = NULL;
switch (reduction_code)
{
case PLUS_EXPR:
case MULT_EXPR:
case MINUS_EXPR:
case BIT_AND_EXPR:
case BIT_XOR_EXPR:
case BIT_IOR_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
reduction_id = cp_operator_id (reduction_code);
break;
case MIN_EXPR:
p = "min";
break;
case MAX_EXPR:
p = "max";
break;
default:
break;
}
if (p == NULL)
{
if (TREE_CODE (reduction_id) != IDENTIFIER_NODE)
return error_mark_node;
p = IDENTIFIER_POINTER (reduction_id);
}
if (type != NULL_TREE)
m = mangle_type_string (TYPE_MAIN_VARIANT (type));
const char prefix[] = "omp declare reduction ";
size_t lenp = sizeof (prefix);
if (strncmp (p, prefix, lenp - 1) == 0)
lenp = 1;
size_t len = strlen (p);
size_t lenm = m ? strlen (m) + 1 : 0;
char *name = XALLOCAVEC (char, lenp + len + lenm);
if (lenp > 1)
memcpy (name, prefix, lenp - 1);
memcpy (name + lenp - 1, p, len + 1);
if (m)
{
name[lenp + len - 1] = '~';
memcpy (name + lenp + len, m, lenm);
}
return get_identifier (name);
}
/* Lookup OpenMP UDR ID for TYPE, return the corresponding artificial
FUNCTION_DECL or NULL_TREE if not found. */
static tree
omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp,
vec<tree> *ambiguousp)
{
tree orig_id = id;
tree baselink = NULL_TREE;
if (identifier_p (id))
{
cp_id_kind idk;
bool nonint_cst_expression_p;
const char *error_msg;
id = omp_reduction_id (ERROR_MARK, id, type);
tree decl = lookup_name (id);
if (decl == NULL_TREE)
decl = error_mark_node;
id = finish_id_expression (id, decl, NULL_TREE, &idk, false, true,
&nonint_cst_expression_p, false, true, false,
false, &error_msg, loc);
if (idk == CP_ID_KIND_UNQUALIFIED
&& identifier_p (id))
{
vec<tree, va_gc> *args = NULL;
vec_safe_push (args, build_reference_type (type));
id = perform_koenig_lookup (id, args, tf_none);
}
}
else if (TREE_CODE (id) == SCOPE_REF)
id = lookup_qualified_name (TREE_OPERAND (id, 0),
omp_reduction_id (ERROR_MARK,
TREE_OPERAND (id, 1),
type),
false, false);
tree fns = id;
if (id && is_overloaded_fn (id))
id = get_fns (id);
for (; id; id = OVL_NEXT (id))
{
tree fndecl = OVL_CURRENT (id);
if (TREE_CODE (fndecl) == FUNCTION_DECL)
{
tree argtype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
if (same_type_p (TREE_TYPE (argtype), type))
break;
}
}
if (id && BASELINK_P (fns))
{
if (baselinkp)
*baselinkp = fns;
else
baselink = fns;
}
if (id == NULL_TREE && CLASS_TYPE_P (type) && TYPE_BINFO (type))
{
vec<tree> ambiguous = vNULL;
tree binfo = TYPE_BINFO (type), base_binfo, ret = NULL_TREE;
unsigned int ix;
if (ambiguousp == NULL)
ambiguousp = &ambiguous;
for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ix++)
{
id = omp_reduction_lookup (loc, orig_id, BINFO_TYPE (base_binfo),
baselinkp ? baselinkp : &baselink,
ambiguousp);
if (id == NULL_TREE)
continue;
if (!ambiguousp->is_empty ())
ambiguousp->safe_push (id);
else if (ret != NULL_TREE)
{
ambiguousp->safe_push (ret);
ambiguousp->safe_push (id);
ret = NULL_TREE;
}
else
ret = id;
}
if (ambiguousp != &ambiguous)
return ret;
if (!ambiguous.is_empty ())
{
const char *str = _("candidates are:");
unsigned int idx;
tree udr;
error_at (loc, "user defined reduction lookup is ambiguous");
FOR_EACH_VEC_ELT (ambiguous, idx, udr)
{
inform (DECL_SOURCE_LOCATION (udr), "%s %#D", str, udr);
if (idx == 0)
str = get_spaces (str);
}
ambiguous.release ();
ret = error_mark_node;
baselink = NULL_TREE;
}
id = ret;
}
if (id && baselink)
perform_or_defer_access_check (BASELINK_BINFO (baselink),
id, id, tf_warning_or_error);
return id;
}
/* Helper function for cp_parser_omp_declare_reduction_exprs
and tsubst_omp_udr.
Remove CLEANUP_STMT for data (omp_priv variable).
Also append INIT_EXPR for DECL_INITIAL of omp_priv after its
DECL_EXPR. */
tree
cp_remove_omp_priv_cleanup_stmt (tree *tp, int *walk_subtrees, void *data)
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == (tree) data)
*tp = CLEANUP_BODY (*tp);
else if (TREE_CODE (*tp) == DECL_EXPR)
{
tree decl = DECL_EXPR_DECL (*tp);
if (!processing_template_decl
&& decl == (tree) data
&& DECL_INITIAL (decl)
&& DECL_INITIAL (decl) != error_mark_node)
{
tree list = NULL_TREE;
append_to_statement_list_force (*tp, &list);
tree init_expr = build2 (INIT_EXPR, void_type_node,
decl, DECL_INITIAL (decl));
DECL_INITIAL (decl) = NULL_TREE;
append_to_statement_list_force (init_expr, &list);
*tp = list;
}
}
return NULL_TREE;
}
/* Data passed from cp_check_omp_declare_reduction to
cp_check_omp_declare_reduction_r. */
struct cp_check_omp_declare_reduction_data
{
location_t loc;
tree stmts[7];
bool combiner_p;
};
/* Helper function for cp_check_omp_declare_reduction, called via
cp_walk_tree. */
static tree
cp_check_omp_declare_reduction_r (tree *tp, int *, void *data)
{
struct cp_check_omp_declare_reduction_data *udr_data
= (struct cp_check_omp_declare_reduction_data *) data;
if (SSA_VAR_P (*tp)
&& !DECL_ARTIFICIAL (*tp)
&& *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 0 : 3])
&& *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 1 : 4]))
{
location_t loc = udr_data->loc;
if (udr_data->combiner_p)
error_at (loc, "%<#pragma omp declare reduction%> combiner refers to "
"variable %qD which is not %<omp_out%> nor %<omp_in%>",
*tp);
else
error_at (loc, "%<#pragma omp declare reduction%> initializer refers "
"to variable %qD which is not %<omp_priv%> nor "
"%<omp_orig%>",
*tp);
return *tp;
}
return NULL_TREE;
}
/* Diagnose violation of OpenMP #pragma omp declare reduction restrictions. */
void
cp_check_omp_declare_reduction (tree udr)
{
tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (udr)));
gcc_assert (TREE_CODE (type) == REFERENCE_TYPE);
type = TREE_TYPE (type);
int i;
location_t loc = DECL_SOURCE_LOCATION (udr);
if (type == error_mark_node)
return;
if (ARITHMETIC_TYPE_P (type))
{
static enum tree_code predef_codes[]
= { PLUS_EXPR, MULT_EXPR, MINUS_EXPR, BIT_AND_EXPR, BIT_XOR_EXPR,
BIT_IOR_EXPR, TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR };
for (i = 0; i < 8; i++)
{
tree id = omp_reduction_id (predef_codes[i], NULL_TREE, NULL_TREE);
const char *n1 = IDENTIFIER_POINTER (DECL_NAME (udr));
const char *n2 = IDENTIFIER_POINTER (id);
if (strncmp (n1, n2, IDENTIFIER_LENGTH (id)) == 0
&& (n1[IDENTIFIER_LENGTH (id)] == '~'
|| n1[IDENTIFIER_LENGTH (id)] == '\0'))
break;
}
if (i == 8
&& TREE_CODE (type) != COMPLEX_EXPR)
{
const char prefix_minmax[] = "omp declare reduction m";
size_t prefix_size = sizeof (prefix_minmax) - 1;
const char *n = IDENTIFIER_POINTER (DECL_NAME (udr));
if (strncmp (IDENTIFIER_POINTER (DECL_NAME (udr)),
prefix_minmax, prefix_size) == 0
&& ((n[prefix_size] == 'i' && n[prefix_size + 1] == 'n')
|| (n[prefix_size] == 'a' && n[prefix_size + 1] == 'x'))
&& (n[prefix_size + 2] == '~' || n[prefix_size + 2] == '\0'))
i = 0;
}
if (i < 8)
{
error_at (loc, "predeclared arithmetic type %qT in "
"%<#pragma omp declare reduction%>", type);
return;
}
}
else if (TREE_CODE (type) == FUNCTION_TYPE
|| TREE_CODE (type) == METHOD_TYPE
|| TREE_CODE (type) == ARRAY_TYPE)
{
error_at (loc, "function or array type %qT in "
"%<#pragma omp declare reduction%>", type);
return;
}
else if (TREE_CODE (type) == REFERENCE_TYPE)
{
error_at (loc, "reference type %qT in %<#pragma omp declare reduction%>",
type);
return;
}
else if (TYPE_QUALS_NO_ADDR_SPACE (type))
{
error_at (loc, "const, volatile or __restrict qualified type %qT in "
"%<#pragma omp declare reduction%>", type);
return;
}
tree body = DECL_SAVED_TREE (udr);
if (body == NULL_TREE || TREE_CODE (body) != STATEMENT_LIST)
return;
tree_stmt_iterator tsi;
struct cp_check_omp_declare_reduction_data data;
memset (data.stmts, 0, sizeof data.stmts);
for (i = 0, tsi = tsi_start (body);
i < 7 && !tsi_end_p (tsi);
i++, tsi_next (&tsi))
data.stmts[i] = tsi_stmt (tsi);
data.loc = loc;
gcc_assert (tsi_end_p (tsi));
if (i >= 3)
{
gcc_assert (TREE_CODE (data.stmts[0]) == DECL_EXPR
&& TREE_CODE (data.stmts[1]) == DECL_EXPR);
if (TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])))
return;
data.combiner_p = true;
if (cp_walk_tree (&data.stmts[2], cp_check_omp_declare_reduction_r,
&data, NULL))
TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1;
}
if (i >= 6)
{
gcc_assert (TREE_CODE (data.stmts[3]) == DECL_EXPR
&& TREE_CODE (data.stmts[4]) == DECL_EXPR);
data.combiner_p = false;
if (cp_walk_tree (&data.stmts[5], cp_check_omp_declare_reduction_r,
&data, NULL)
|| cp_walk_tree (&DECL_INITIAL (DECL_EXPR_DECL (data.stmts[3])),
cp_check_omp_declare_reduction_r, &data, NULL))
TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1;
if (i == 7)
gcc_assert (TREE_CODE (data.stmts[6]) == DECL_EXPR);
}
}
/* Helper function of finish_omp_clauses. Clone STMT as if we were making
an inline call. But, remap
the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER
and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */
static tree
clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2,
tree decl, tree placeholder)
{
copy_body_data id;
hash_map<tree, tree> decl_map;
decl_map.put (omp_decl1, placeholder);
decl_map.put (omp_decl2, decl);
memset (&id, 0, sizeof (id));
id.src_fn = DECL_CONTEXT (omp_decl1);
id.dst_fn = current_function_decl;
id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn);
id.decl_map = &decl_map;
id.copy_decl = copy_decl_no_change;
id.transform_call_graph_edges = CB_CGE_DUPLICATE;
id.transform_new_cfg = true;
id.transform_return_to_modify = false;
id.transform_lang_insert_block = NULL;
id.eh_lp_nr = 0;
walk_tree (&stmt, copy_tree_body_r, &id, NULL);
return stmt;
}
/* Helper function of finish_omp_clauses, called via cp_walk_tree.
Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */
static tree
find_omp_placeholder_r (tree *tp, int *, void *data)
{
if (*tp == (tree) data)
return *tp;
return NULL_TREE;
}
/* Helper function of finish_omp_clauses. Handle OMP_CLAUSE_REDUCTION C.
Return true if there is some error and the clause should be removed. */
static bool
finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor)
{
tree t = OMP_CLAUSE_DECL (c);
bool predefined = false;
if (TREE_CODE (t) == TREE_LIST)
{
gcc_assert (processing_template_decl);
return false;
}
tree type = TREE_TYPE (t);
if (TREE_CODE (t) == MEM_REF)
type = TREE_TYPE (type);
if (TREE_CODE (type) == REFERENCE_TYPE)
type = TREE_TYPE (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
tree oatype = type;
gcc_assert (TREE_CODE (t) != MEM_REF);
while (TREE_CODE (type) == ARRAY_TYPE)
type = TREE_TYPE (type);
if (!processing_template_decl)
{
t = require_complete_type (t);
if (t == error_mark_node)
return true;
tree size = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (oatype),
TYPE_SIZE_UNIT (type));
if (integer_zerop (size))
{
error ("%qE in %<reduction%> clause is a zero size array",
omp_clause_printable_decl (t));
return true;
}
size = size_binop (MINUS_EXPR, size, size_one_node);
tree index_type = build_index_type (size);
tree atype = build_array_type (type, index_type);
tree ptype = build_pointer_type (type);
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
t = build_fold_addr_expr (t);
t = build2 (MEM_REF, atype, t, build_int_cst (ptype, 0));
OMP_CLAUSE_DECL (c) = t;
}
}
if (type == error_mark_node)
return true;
else if (ARITHMETIC_TYPE_P (type))
switch (OMP_CLAUSE_REDUCTION_CODE (c))
{
case PLUS_EXPR:
case MULT_EXPR:
case MINUS_EXPR:
predefined = true;
break;
case MIN_EXPR:
case MAX_EXPR:
if (TREE_CODE (type) == COMPLEX_TYPE)
break;
predefined = true;
break;
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
if (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
break;
predefined = true;
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
if (FLOAT_TYPE_P (type))
break;
predefined = true;
break;
default:
break;
}
else if (TYPE_READONLY (type))
{
error ("%qE has const type for %<reduction%>",
omp_clause_printable_decl (t));
return true;
}
else if (!processing_template_decl)
{
t = require_complete_type (t);
if (t == error_mark_node)
return true;
OMP_CLAUSE_DECL (c) = t;
}
if (predefined)
{
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE;
return false;
}
else if (processing_template_decl)
return false;
tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
type = TYPE_MAIN_VARIANT (type);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE;
if (id == NULL_TREE)
id = omp_reduction_id (OMP_CLAUSE_REDUCTION_CODE (c),
NULL_TREE, NULL_TREE);
id = omp_reduction_lookup (OMP_CLAUSE_LOCATION (c), id, type, NULL, NULL);
if (id)
{
if (id == error_mark_node)
return true;
id = OVL_CURRENT (id);
mark_used (id);
tree body = DECL_SAVED_TREE (id);
if (!body)
return true;
if (TREE_CODE (body) == STATEMENT_LIST)
{
tree_stmt_iterator tsi;
tree placeholder = NULL_TREE, decl_placeholder = NULL_TREE;
int i;
tree stmts[7];
tree atype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (id)));
atype = TREE_TYPE (atype);
bool need_static_cast = !same_type_p (type, atype);
memset (stmts, 0, sizeof stmts);
for (i = 0, tsi = tsi_start (body);
i < 7 && !tsi_end_p (tsi);
i++, tsi_next (&tsi))
stmts[i] = tsi_stmt (tsi);
gcc_assert (tsi_end_p (tsi));
if (i >= 3)
{
gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR
&& TREE_CODE (stmts[1]) == DECL_EXPR);
placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type);
DECL_ARTIFICIAL (placeholder) = 1;
DECL_IGNORED_P (placeholder) = 1;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder;
if (TREE_CODE (t) == MEM_REF)
{
decl_placeholder = build_lang_decl (VAR_DECL, NULL_TREE,
type);
DECL_ARTIFICIAL (decl_placeholder) = 1;
DECL_IGNORED_P (decl_placeholder) = 1;
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = decl_placeholder;
}
if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[0])))
cxx_mark_addressable (placeholder);
if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[1]))
&& TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c)))
!= REFERENCE_TYPE)
cxx_mark_addressable (decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c));
tree omp_out = placeholder;
tree omp_in = decl_placeholder ? decl_placeholder
: convert_from_reference (OMP_CLAUSE_DECL (c));
if (need_static_cast)
{
tree rtype = build_reference_type (atype);
omp_out = build_static_cast (rtype, omp_out,
tf_warning_or_error);
omp_in = build_static_cast (rtype, omp_in,
tf_warning_or_error);
if (omp_out == error_mark_node || omp_in == error_mark_node)
return true;
omp_out = convert_from_reference (omp_out);
omp_in = convert_from_reference (omp_in);
}
OMP_CLAUSE_REDUCTION_MERGE (c)
= clone_omp_udr (stmts[2], DECL_EXPR_DECL (stmts[0]),
DECL_EXPR_DECL (stmts[1]), omp_in, omp_out);
}
if (i >= 6)
{
gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR
&& TREE_CODE (stmts[4]) == DECL_EXPR);
if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[3])))
cxx_mark_addressable (decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c));
if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[4])))
cxx_mark_addressable (placeholder);
tree omp_priv = decl_placeholder ? decl_placeholder
: convert_from_reference (OMP_CLAUSE_DECL (c));
tree omp_orig = placeholder;
if (need_static_cast)
{
if (i == 7)
{
error_at (OMP_CLAUSE_LOCATION (c),
"user defined reduction with constructor "
"initializer for base class %qT", atype);
return true;
}
tree rtype = build_reference_type (atype);
omp_priv = build_static_cast (rtype, omp_priv,
tf_warning_or_error);
omp_orig = build_static_cast (rtype, omp_orig,
tf_warning_or_error);
if (omp_priv == error_mark_node
|| omp_orig == error_mark_node)
return true;
omp_priv = convert_from_reference (omp_priv);
omp_orig = convert_from_reference (omp_orig);
}
if (i == 6)
*need_default_ctor = true;
OMP_CLAUSE_REDUCTION_INIT (c)
= clone_omp_udr (stmts[5], DECL_EXPR_DECL (stmts[4]),
DECL_EXPR_DECL (stmts[3]),
omp_priv, omp_orig);
if (cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c),
find_omp_placeholder_r, placeholder, NULL))
OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1;
}
else if (i >= 3)
{
if (CLASS_TYPE_P (type) && !pod_type_p (type))
*need_default_ctor = true;
else
{
tree init;
tree v = decl_placeholder ? decl_placeholder
: convert_from_reference (t);
if (AGGREGATE_TYPE_P (TREE_TYPE (v)))
init = build_constructor (TREE_TYPE (v), NULL);
else
init = fold_convert (TREE_TYPE (v), integer_zero_node);
OMP_CLAUSE_REDUCTION_INIT (c)
= build2 (INIT_EXPR, TREE_TYPE (v), v, init);
}
}
}
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
*need_dtor = true;
else
{
error ("user defined reduction not found for %qE",
omp_clause_printable_decl (t));
return true;
}
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
gcc_assert (TYPE_SIZE_UNIT (type)
&& TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST);
return false;
}
/* Called from finish_struct_1. linear(this) or linear(this:step)
clauses might not be finalized yet because the class has been incomplete
when parsing #pragma omp declare simd methods. Fix those up now. */
void
finish_omp_declare_simd_methods (tree t)
{
if (processing_template_decl)
return;
for (tree x = TYPE_METHODS (t); x; x = DECL_CHAIN (x))
{
if (TREE_CODE (TREE_TYPE (x)) != METHOD_TYPE)
continue;
tree ods = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (x));
if (!ods || !TREE_VALUE (ods))
continue;
for (tree c = TREE_VALUE (TREE_VALUE (ods)); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& integer_zerop (OMP_CLAUSE_DECL (c))
&& OMP_CLAUSE_LINEAR_STEP (c)
&& TREE_CODE (TREE_TYPE (OMP_CLAUSE_LINEAR_STEP (c)))
== POINTER_TYPE)
{
tree s = OMP_CLAUSE_LINEAR_STEP (c);
s = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, s);
s = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MULT_EXPR,
sizetype, s, TYPE_SIZE_UNIT (t));
OMP_CLAUSE_LINEAR_STEP (c) = s;
}
}
}
/* Adjust sink depend clause to take into account pointer offsets.
Return TRUE if there was a problem processing the offset, and the
whole clause should be removed. */
static bool
cp_finish_omp_clause_depend_sink (tree sink_clause)
{
tree t = OMP_CLAUSE_DECL (sink_clause);
gcc_assert (TREE_CODE (t) == TREE_LIST);
/* Make sure we don't adjust things twice for templates. */
if (processing_template_decl)
return false;
for (; t; t = TREE_CHAIN (t))
{
tree decl = TREE_VALUE (t);
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree offset = TREE_PURPOSE (t);
bool neg = wi::neg_p ((wide_int) offset);
offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset);
decl = mark_rvalue_use (decl);
decl = convert_from_reference (decl);
tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (sink_clause),
neg ? MINUS_EXPR : PLUS_EXPR,
decl, offset);
t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (sink_clause),
MINUS_EXPR, sizetype,
fold_convert (sizetype, t2),
fold_convert (sizetype, decl));
if (t2 == error_mark_node)
return true;
TREE_PURPOSE (t) = t2;
}
}
return false;
}
/* For all elements of CLAUSES, validate them vs OpenMP constraints.
Remove any elements from the list that are invalid. */
tree
finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
{
bitmap_head generic_head, firstprivate_head, lastprivate_head;
bitmap_head aligned_head, map_head, map_field_head, oacc_reduction_head;
tree c, t, *pc;
tree safelen = NULL_TREE;
bool branch_seen = false;
bool copyprivate_seen = false;
bool ordered_seen = false;
bool oacc_async = false;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
bitmap_initialize (&firstprivate_head, &bitmap_default_obstack);
bitmap_initialize (&lastprivate_head, &bitmap_default_obstack);
bitmap_initialize (&aligned_head, &bitmap_default_obstack);
bitmap_initialize (&map_head, &bitmap_default_obstack);
bitmap_initialize (&map_field_head, &bitmap_default_obstack);
bitmap_initialize (&oacc_reduction_head, &bitmap_default_obstack);
if (ort & C_ORT_ACC)
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ASYNC)
{
oacc_async = true;
break;
}
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool field_ok = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP);
goto check_dup_generic;
case OMP_CLAUSE_PRIVATE:
field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP);
goto check_dup_generic;
case OMP_CLAUSE_REDUCTION:
field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP);
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
{
remove = true;
break;
}
if (TREE_CODE (t) == TREE_LIST)
{
while (TREE_CODE (t) == TREE_LIST)
t = TREE_CHAIN (t);
}
else
{
gcc_assert (TREE_CODE (t) == MEM_REF);
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == ADDR_EXPR
|| TREE_CODE (t) == INDIRECT_REF)
t = TREE_OPERAND (t, 0);
}
tree n = omp_clause_decl_field (t);
if (n)
t = n;
goto check_dup_generic_t;
}
if (oacc_async)
cxx_mark_addressable (t);
goto check_dup_generic;
case OMP_CLAUSE_COPYPRIVATE:
copyprivate_seen = true;
field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP);
goto check_dup_generic;
case OMP_CLAUSE_COPYIN:
goto check_dup_generic;
case OMP_CLAUSE_LINEAR:
field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP);
t = OMP_CLAUSE_DECL (c);
if (ort != C_ORT_OMP_DECLARE_SIMD
&& OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_DEFAULT)
{
error_at (OMP_CLAUSE_LOCATION (c),
"modifier should not be specified in %<linear%> "
"clause on %<simd%> or %<for%> constructs");
OMP_CLAUSE_LINEAR_KIND (c) = OMP_CLAUSE_LINEAR_DEFAULT;
}
if ((VAR_P (t) || TREE_CODE (t) == PARM_DECL)
&& !type_dependent_expression_p (t))
{
tree type = TREE_TYPE (t);
if ((OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF
|| OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_UVAL)
&& TREE_CODE (type) != REFERENCE_TYPE)
{
error ("linear clause with %qs modifier applied to "
"non-reference variable with %qT type",
OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF
? "ref" : "uval", TREE_TYPE (t));
remove = true;
break;
}
if (TREE_CODE (type) == REFERENCE_TYPE)
type = TREE_TYPE (type);
if (ort == C_ORT_CILK)
{
if (!INTEGRAL_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type)
&& TREE_CODE (type) != POINTER_TYPE)
{
error ("linear clause applied to non-integral, "
"non-floating, non-pointer variable with %qT type",
TREE_TYPE (t));
remove = true;
break;
}
}
else if (OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_REF)
{
if (!INTEGRAL_TYPE_P (type)
&& TREE_CODE (type) != POINTER_TYPE)
{
error ("linear clause applied to non-integral non-pointer"
" variable with %qT type", TREE_TYPE (t));
remove = true;
break;
}
}
}
t = OMP_CLAUSE_LINEAR_STEP (c);
if (t == NULL_TREE)
t = integer_one_node;
if (t == error_mark_node)
{
remove = true;
break;
}
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t))
&& (ort != C_ORT_OMP_DECLARE_SIMD
|| TREE_CODE (t) != PARM_DECL
|| TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE
|| !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (t)))))
{
error ("linear step expression must be integral");
remove = true;
break;
}
else
{
t = mark_rvalue_use (t);
if (ort == C_ORT_OMP_DECLARE_SIMD && TREE_CODE (t) == PARM_DECL)
{
OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) = 1;
goto check_dup_generic;
}
if (!processing_template_decl
&& (VAR_P (OMP_CLAUSE_DECL (c))
|| TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL))
{
if (ort == C_ORT_OMP_DECLARE_SIMD)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause step %qE is neither "
"constant nor a parameter", t);
remove = true;
break;
}
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
tree type = TREE_TYPE (OMP_CLAUSE_DECL (c));
if (TREE_CODE (type) == REFERENCE_TYPE)
type = TREE_TYPE (type);
if (OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF)
{
type = build_pointer_type (type);
tree d = fold_convert (type, OMP_CLAUSE_DECL (c));
t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR,
d, t);
t = fold_build2_loc (OMP_CLAUSE_LOCATION (c),
MINUS_EXPR, sizetype,
fold_convert (sizetype, t),
fold_convert (sizetype, d));
if (t == error_mark_node)
{
remove = true;
break;
}
}
else if (TREE_CODE (type) == POINTER_TYPE
/* Can't multiply the step yet if *this
is still incomplete type. */
&& (ort != C_ORT_OMP_DECLARE_SIMD
|| TREE_CODE (OMP_CLAUSE_DECL (c)) != PARM_DECL
|| !DECL_ARTIFICIAL (OMP_CLAUSE_DECL (c))
|| DECL_NAME (OMP_CLAUSE_DECL (c))
!= this_identifier
|| !TYPE_BEING_DEFINED (TREE_TYPE (type))))
{
tree d = convert_from_reference (OMP_CLAUSE_DECL (c));
t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR,
d, t);
t = fold_build2_loc (OMP_CLAUSE_LOCATION (c),
MINUS_EXPR, sizetype,
fold_convert (sizetype, t),
fold_convert (sizetype, d));
if (t == error_mark_node)
{
remove = true;
break;
}
}
else
t = fold_convert (type, t);
}
OMP_CLAUSE_LINEAR_STEP (c) = t;
}
goto check_dup_generic;
check_dup_generic:
t = omp_clause_decl_field (OMP_CLAUSE_DECL (c));
if (t)
{
if (!remove && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED)
omp_note_field_privatization (t, OMP_CLAUSE_DECL (c));
}
else
t = OMP_CLAUSE_DECL (c);
check_dup_generic_t:
if (t == current_class_ptr
&& (ort != C_ORT_OMP_DECLARE_SIMD
|| (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_UNIFORM)))
{
error ("%<this%> allowed in OpenMP only in %<declare simd%>"
" clauses");
remove = true;
break;
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL
&& (!field_ok || TREE_CODE (t) != FIELD_DECL))
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
break;
if (DECL_P (t))
error ("%qD is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error ("%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (ort == C_ORT_ACC
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t)))
{
error ("%qD appears more than once in reduction clauses", t);
remove = true;
}
else
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error ("%qD appears more than once in data clauses", t);
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error ("%qD appears more than once in data clauses", t);
else
error ("%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
if (!field_ok)
break;
handle_field_decl:
if (!remove
&& TREE_CODE (t) == FIELD_DECL
&& t == OMP_CLAUSE_DECL (c)
&& ort != C_ORT_ACC)
{
OMP_CLAUSE_DECL (c)
= omp_privatize_field (t, (OMP_CLAUSE_CODE (c)
== OMP_CLAUSE_SHARED));
if (OMP_CLAUSE_DECL (c) == error_mark_node)
remove = true;
}
break;
case OMP_CLAUSE_FIRSTPRIVATE:
t = omp_clause_decl_field (OMP_CLAUSE_DECL (c));
if (t)
omp_note_field_privatization (t, OMP_CLAUSE_DECL (c));
else
t = OMP_CLAUSE_DECL (c);
if (ort != C_ORT_ACC && t == current_class_ptr)
{
error ("%<this%> allowed in OpenMP only in %<declare simd%>"
" clauses");
remove = true;
break;
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL
&& ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP
|| TREE_CODE (t) != FIELD_DECL))
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
break;
if (DECL_P (t))
error ("%qD is not a variable in clause %<firstprivate%>", t);
else
error ("%qE is not a variable in clause %<firstprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
error ("%qD appears more than once in data clauses", t);
remove = true;
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error ("%qD appears more than once in data clauses", t);
else
error ("%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&firstprivate_head, DECL_UID (t));
goto handle_field_decl;
case OMP_CLAUSE_LASTPRIVATE:
t = omp_clause_decl_field (OMP_CLAUSE_DECL (c));
if (t)
omp_note_field_privatization (t, OMP_CLAUSE_DECL (c));
else
t = OMP_CLAUSE_DECL (c);
if (t == current_class_ptr)
{
error ("%<this%> allowed in OpenMP only in %<declare simd%>"
" clauses");
remove = true;
break;
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL
&& ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP
|| TREE_CODE (t) != FIELD_DECL))
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
break;
if (DECL_P (t))
error ("%qD is not a variable in clause %<lastprivate%>", t);
else
error ("%qE is not a variable in clause %<lastprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error ("%qD appears more than once in data clauses", t);
remove = true;
}
else
bitmap_set_bit (&lastprivate_head, DECL_UID (t));
goto handle_field_decl;
case OMP_CLAUSE_IF:
t = OMP_CLAUSE_IF_EXPR (c);
t = maybe_convert_cond (t);
if (t == error_mark_node)
remove = true;
else if (!processing_template_decl)
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
OMP_CLAUSE_IF_EXPR (c) = t;
break;
case OMP_CLAUSE_FINAL:
t = OMP_CLAUSE_FINAL_EXPR (c);
t = maybe_convert_cond (t);
if (t == error_mark_node)
remove = true;
else if (!processing_template_decl)
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
OMP_CLAUSE_FINAL_EXPR (c) = t;
break;
case OMP_CLAUSE_GANG:
/* Operand 1 is the gang static: argument. */
t = OMP_CLAUSE_OPERAND (c, 1);
if (t != NULL_TREE)
{
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<gang%> static expression must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) == INTEGER_CST
&& tree_int_cst_sgn (t) != 1
&& t != integer_minus_one_node)
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%<gang%> static value must be "
"positive");
t = integer_one_node;
}
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
OMP_CLAUSE_OPERAND (c, 1) = t;
}
/* Check operand 0, the num argument. */
/* FALLTHRU */
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
if (OMP_CLAUSE_OPERAND (c, 0) == NULL_TREE)
break;
/* FALLTHRU */
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
t = OMP_CLAUSE_OPERAND (c, 0);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
error_at (OMP_CLAUSE_LOCATION (c),
"%<gang%> num expression must be integral"); break;
case OMP_CLAUSE_VECTOR:
error_at (OMP_CLAUSE_LOCATION (c),
"%<vector%> length expression must be integral");
break;
case OMP_CLAUSE_WORKER:
error_at (OMP_CLAUSE_LOCATION (c),
"%<worker%> num expression must be integral");
break;
default:
error_at (OMP_CLAUSE_LOCATION (c),
"%qs expression must be integral",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
}
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) == INTEGER_CST
&& tree_int_cst_sgn (t) != 1)
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%<gang%> num value must be positive");
break;
case OMP_CLAUSE_VECTOR:
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%<vector%> length value must be "
"positive");
break;
case OMP_CLAUSE_WORKER:
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%<worker%> num value must be "
"positive");
break;
default:
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%qs value must be positive",
omp_clause_code_name
[OMP_CLAUSE_CODE (c)]);
}
t = integer_one_node;
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
OMP_CLAUSE_OPERAND (c, 0) = t;
}
break;
case OMP_CLAUSE_SCHEDULE:
if (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
{
const char *p = NULL;
switch (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK)
{
case OMP_CLAUSE_SCHEDULE_STATIC: p = "static"; break;
case OMP_CLAUSE_SCHEDULE_DYNAMIC: break;
case OMP_CLAUSE_SCHEDULE_GUIDED: break;
case OMP_CLAUSE_SCHEDULE_AUTO: p = "auto"; break;
case OMP_CLAUSE_SCHEDULE_RUNTIME: p = "runtime"; break;
default: gcc_unreachable ();
}
if (p)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<nonmonotonic%> modifier specified for %qs "
"schedule kind", p);
OMP_CLAUSE_SCHEDULE_KIND (c)
= (enum omp_clause_schedule_kind)
(OMP_CLAUSE_SCHEDULE_KIND (c)
& ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC);
}
}
t = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c);
if (t == NULL)
;
else if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& (OMP_CLAUSE_SCHEDULE_KIND (c)
!= OMP_CLAUSE_SCHEDULE_CILKFOR)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("schedule chunk size expression must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
if (OMP_CLAUSE_SCHEDULE_KIND (c)
== OMP_CLAUSE_SCHEDULE_CILKFOR)
{
t = convert_to_integer (long_integer_type_node, t);
if (t == error_mark_node)
{
remove = true;
break;
}
}
else
{
t = maybe_constant_value (t);
if (TREE_CODE (t) == INTEGER_CST
&& tree_int_cst_sgn (t) != 1)
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"chunk size value must be positive");
t = integer_one_node;
}
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
}
break;
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_SAFELEN:
t = OMP_CLAUSE_OPERAND (c, 0);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%qs length expression must be integral",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) != INTEGER_CST
|| tree_int_cst_sgn (t) != 1)
{
error ("%qs length expression must be positive constant"
" integer expression",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
}
OMP_CLAUSE_OPERAND (c, 0) = t;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SAFELEN)
safelen = c;
}
break;
case OMP_CLAUSE_ASYNC:
t = OMP_CLAUSE_ASYNC_EXPR (c);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<async%> expression must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
OMP_CLAUSE_ASYNC_EXPR (c) = t;
}
break;
case OMP_CLAUSE_WAIT:
t = OMP_CLAUSE_WAIT_EXPR (c);
if (t == error_mark_node)
remove = true;
else if (!processing_template_decl)
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
OMP_CLAUSE_WAIT_EXPR (c) = t;
break;
case OMP_CLAUSE_THREAD_LIMIT:
t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<thread_limit%> expression must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) == INTEGER_CST
&& tree_int_cst_sgn (t) != 1)
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%<thread_limit%> value must be positive");
t = integer_one_node;
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t;
}
break;
case OMP_CLAUSE_DEVICE:
t = OMP_CLAUSE_DEVICE_ID (c);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<device%> id must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
OMP_CLAUSE_DEVICE_ID (c) = t;
}
break;
case OMP_CLAUSE_DIST_SCHEDULE:
t = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c);
if (t == NULL)
;
else if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<dist_schedule%> chunk size expression must be "
"integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t;
}
break;
case OMP_CLAUSE_ALIGNED:
t = OMP_CLAUSE_DECL (c);
if (t == current_class_ptr && ort != C_ORT_OMP_DECLARE_SIMD)
{
error ("%<this%> allowed in OpenMP only in %<declare simd%>"
" clauses");
remove = true;
break;
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
break;
if (DECL_P (t))
error ("%qD is not a variable in %<aligned%> clause", t);
else
error ("%qE is not a variable in %<aligned%> clause", t);
remove = true;
}
else if (!type_dependent_expression_p (t)
&& TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE
&& TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE
&& (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE
|| (!POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (t)))
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (t)))
!= ARRAY_TYPE))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE in %<aligned%> clause is neither a pointer nor "
"an array nor a reference to pointer or array", t);
remove = true;
}
else if (bitmap_bit_p (&aligned_head, DECL_UID (t)))
{
error ("%qD appears more than once in %<aligned%> clauses", t);
remove = true;
}
else
bitmap_set_bit (&aligned_head, DECL_UID (t));
t = OMP_CLAUSE_ALIGNED_ALIGNMENT (c);
if (t == error_mark_node)
remove = true;
else if (t == NULL_TREE)
break;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<aligned%> clause alignment expression must "
"be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) != INTEGER_CST
|| tree_int_cst_sgn (t) != 1)
{
error ("%<aligned%> clause alignment expression must be "
"positive constant integer expression");
remove = true;
}
}
OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = t;
}
break;
case OMP_CLAUSE_DEPEND:
t = OMP_CLAUSE_DECL (c);
if (t == NULL_TREE)
{
gcc_assert (OMP_CLAUSE_DEPEND_KIND (c)
== OMP_CLAUSE_DEPEND_SOURCE);
break;
}
if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
if (cp_finish_omp_clause_depend_sink (c))
remove = true;
break;
}
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
remove = true;
break;
}
if (t == error_mark_node)
remove = true;
else if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
break;
if (DECL_P (t))
error ("%qD is not a variable in %<depend%> clause", t);
else
error ("%qE is not a variable in %<depend%> clause", t);
remove = true;
}
else if (t == current_class_ptr)
{
error ("%<this%> allowed in OpenMP only in %<declare simd%>"
" clauses");
remove = true;
}
else if (!processing_template_decl
&& !cxx_mark_addressable (t))
remove = true;
break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
remove = true;
else
{
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != TREE_LIST
&& !type_dependent_expression_p (t)
&& !cp_omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"array section does not have mappable type "
"in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
while (TREE_CODE (t) == ARRAY_REF)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
{
while (TREE_CODE (t) == COMPONENT_REF)
t = TREE_OPERAND (t, 0);
if (REFERENCE_REF_P (t))
t = TREE_OPERAND (t, 0);
if (bitmap_bit_p (&map_field_head, DECL_UID (t)))
break;
if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
error ("%qD appears more than once in motion"
" clauses", t);
else if (ort == C_ORT_ACC)
error ("%qD appears more than once in data"
" clauses", t);
else
error ("%qD appears more than once in map"
" clauses", t);
remove = true;
}
else
{
bitmap_set_bit (&map_head, DECL_UID (t));
bitmap_set_bit (&map_field_head, DECL_UID (t));
}
}
}
break;
}
if (t == error_mark_node)
{
remove = true;
break;
}
if (REFERENCE_REF_P (t)
&& TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF)
{
t = TREE_OPERAND (t, 0);
OMP_CLAUSE_DECL (c) = t;
}
if (TREE_CODE (t) == COMPONENT_REF
&& (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE__CACHE_)
{
if (type_dependent_expression_p (t))
break;
if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL
&& DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (!cp_omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have a mappable type in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
while (TREE_CODE (t) == COMPONENT_REF)
{
if (TREE_TYPE (TREE_OPERAND (t, 0))
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
== UNION_TYPE))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is a member of a union", t);
remove = true;
break;
}
t = TREE_OPERAND (t, 0);
}
if (remove)
break;
if (REFERENCE_REF_P (t))
t = TREE_OPERAND (t, 0);
if (VAR_P (t) || TREE_CODE (t) == PARM_DECL)
{
if (bitmap_bit_p (&map_field_head, DECL_UID (t)))
goto handle_map_references;
}
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
break;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER))
break;
if (DECL_P (t))
error ("%qD is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error ("%qE is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t))
{
error ("%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (ort != C_ORT_ACC && t == current_class_ptr)
{
error ("%<this%> allowed in OpenMP only in %<declare simd%>"
" clauses");
remove = true;
break;
}
else if (!processing_template_decl
&& TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE
&& (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER))
&& !cxx_mark_addressable (t))
remove = true;
else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_POINTER)))
&& t == OMP_CLAUSE_DECL (c)
&& !type_dependent_expression_p (t)
&& !cp_omp_mappable_type ((TREE_CODE (TREE_TYPE (t))
== REFERENCE_TYPE)
? TREE_TYPE (TREE_TYPE (t))
: TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD does not have a mappable type in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FORCE_DEVICEPTR
&& !type_dependent_expression_p (t)
&& !POINTER_TYPE_P (TREE_TYPE (t)))
{
error ("%qD is not a pointer variable", t);
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
{
if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
error ("%qD appears more than once in data clauses", t);
remove = true;
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error ("%qD appears more than once in data clauses", t);
else
error ("%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
error ("%qD appears more than once in motion clauses", t);
if (ort == C_ORT_ACC)
error ("%qD appears more than once in data clauses", t);
else
error ("%qD appears more than once in map clauses", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error ("%qD appears more than once in data clauses", t);
else
error ("%qD appears both in data and map clauses", t);
remove = true;
}
else
{
bitmap_set_bit (&map_head, DECL_UID (t));
if (t != OMP_CLAUSE_DECL (c)
&& TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
bitmap_set_bit (&map_field_head, DECL_UID (t));
}
handle_map_references:
if (!remove
&& !processing_template_decl
&& (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP
&& TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == REFERENCE_TYPE)
{
t = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
{
OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t);
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c)
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t)));
}
else if (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE)
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_ALWAYS_POINTER))
{
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
if (TREE_CODE (t) == COMPONENT_REF)
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALWAYS_POINTER);
else
OMP_CLAUSE_SET_MAP_KIND (c2,
GOMP_MAP_FIRSTPRIVATE_REFERENCE);
OMP_CLAUSE_DECL (c2) = t;
OMP_CLAUSE_SIZE (c2) = size_zero_node;
OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = c2;
OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t);
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c)
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t)));
c = c2;
}
}
break;
case OMP_CLAUSE_TO_DECLARE:
case OMP_CLAUSE_LINK:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == FUNCTION_DECL
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE)
;
else if (!VAR_P (t))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE)
{
if (TREE_CODE (t) == OVERLOAD && OVL_CHAIN (t))
error_at (OMP_CLAUSE_LOCATION (c),
"overloaded function name %qE in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else if (TREE_CODE (t) == TEMPLATE_ID_EXPR)
error_at (OMP_CLAUSE_LOCATION (c),
"template %qE in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is neither a variable nor a function name "
"in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
}
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (!cp_omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD does not have a mappable type in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
if (remove)
break;
if (bitmap_bit_p (&generic_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once on the same "
"%<declare target%> directive", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
break;
case OMP_CLAUSE_UNIFORM:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != PARM_DECL)
{
if (processing_template_decl)
break;
if (DECL_P (t))
error ("%qD is not an argument in %<uniform%> clause", t);
else
error ("%qE is not an argument in %<uniform%> clause", t);
remove = true;
break;
}
/* map_head bitmap is used as uniform_head if declare_simd. */
bitmap_set_bit (&map_head, DECL_UID (t));
goto check_dup_generic;
case OMP_CLAUSE_GRAINSIZE:
t = OMP_CLAUSE_GRAINSIZE_EXPR (c);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<grainsize%> expression must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) == INTEGER_CST
&& tree_int_cst_sgn (t) != 1)
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%<grainsize%> value must be positive");
t = integer_one_node;
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
OMP_CLAUSE_GRAINSIZE_EXPR (c) = t;
}
break;
case OMP_CLAUSE_PRIORITY:
t = OMP_CLAUSE_PRIORITY_EXPR (c);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<priority%> expression must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
if (TREE_CODE (t) == INTEGER_CST
&& tree_int_cst_sgn (t) == -1)
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%<priority%> value must be non-negative");
t = integer_one_node;
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
OMP_CLAUSE_PRIORITY_EXPR (c) = t;
}
break;
case OMP_CLAUSE_HINT:
t = OMP_CLAUSE_HINT_EXPR (c);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error ("%<num_tasks%> expression must be integral");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
t = maybe_constant_value (t);
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
OMP_CLAUSE_HINT_EXPR (c) = t;
}
break;
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_PTR:
field_ok = (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP;
t = OMP_CLAUSE_DECL (c);
if (!type_dependent_expression_p (t))
{
tree type = TREE_TYPE (t);
if (TREE_CODE (type) != POINTER_TYPE
&& TREE_CODE (type) != ARRAY_TYPE
&& (TREE_CODE (type) != REFERENCE_TYPE
|| (TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE
&& TREE_CODE (TREE_TYPE (type)) != ARRAY_TYPE)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qs variable is neither a pointer, nor an array "
"nor reference to pointer or array",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
}
goto check_dup_generic;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PARALLEL:
case OMP_CLAUSE_FOR:
case OMP_CLAUSE_SECTIONS:
case OMP_CLAUSE_TASKGROUP:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE__CILK_FOR_COUNT_:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_SEQ:
break;
case OMP_CLAUSE_TILE:
for (tree list = OMP_CLAUSE_TILE_LIST (c); !remove && list;
list = TREE_CHAIN (list))
{
t = TREE_VALUE (list);
if (t == error_mark_node)
remove = true;
else if (!type_dependent_expression_p (t)
&& !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<tile%> argument needs integral type");
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
{
/* Zero is used to indicate '*', we permit you
to get there via an ICE of value zero. */
t = maybe_constant_value (t);
if (!tree_fits_shwi_p (t)
|| tree_to_shwi (t) < 0)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<tile%> argument needs positive "
"integral constant");
remove = true;
}
}
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
/* Update list item. */
TREE_VALUE (list) = t;
}
break;
case OMP_CLAUSE_ORDERED:
ordered_seen = true;
break;
case OMP_CLAUSE_INBRANCH:
case OMP_CLAUSE_NOTINBRANCH:
if (branch_seen)
{
error ("%<inbranch%> clause is incompatible with "
"%<notinbranch%>");
remove = true;
}
branch_seen = true;
break;
default:
gcc_unreachable ();
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
for (pc = &clauses, c = clauses; c ; c = *pc)
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
bool remove = false;
bool need_complete_type = false;
bool need_default_ctor = false;
bool need_copy_ctor = false;
bool need_copy_assignment = false;
bool need_implicitly_determined = false;
bool need_dtor = false;
tree type, inner_type;
switch (c_kind)
{
case OMP_CLAUSE_SHARED:
need_implicitly_determined = true;
break;
case OMP_CLAUSE_PRIVATE:
need_complete_type = true;
need_default_ctor = true;
need_dtor = true;
need_implicitly_determined = true;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
need_complete_type = true;
need_copy_ctor = true;
need_dtor = true;
need_implicitly_determined = true;
break;
case OMP_CLAUSE_LASTPRIVATE:
need_complete_type = true;
need_copy_assignment = true;
need_implicitly_determined = true;
break;
case OMP_CLAUSE_REDUCTION:
need_implicitly_determined = true;
break;
case OMP_CLAUSE_LINEAR:
if (ort != C_ORT_OMP_DECLARE_SIMD)
need_implicitly_determined = true;
else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)
&& !bitmap_bit_p (&map_head,
DECL_UID (OMP_CLAUSE_LINEAR_STEP (c))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause step is a parameter %qD not "
"specified in %<uniform%> clause",
OMP_CLAUSE_LINEAR_STEP (c));
*pc = OMP_CLAUSE_CHAIN (c);
continue;
}
break;
case OMP_CLAUSE_COPYPRIVATE:
need_copy_assignment = true;
break;
case OMP_CLAUSE_COPYIN:
need_copy_assignment = true;
break;
case OMP_CLAUSE_SIMDLEN:
if (safelen
&& !processing_template_decl
&& tree_int_cst_lt (OMP_CLAUSE_SAFELEN_EXPR (safelen),
OMP_CLAUSE_SIMDLEN_EXPR (c)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<simdlen%> clause value is bigger than "
"%<safelen%> clause value");
OMP_CLAUSE_SIMDLEN_EXPR (c)
= OMP_CLAUSE_SAFELEN_EXPR (safelen);
}
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_SCHEDULE:
if (ordered_seen
&& (OMP_CLAUSE_SCHEDULE_KIND (c)
& OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<nonmonotonic%> schedule modifier specified "
"together with %<ordered%> clause");
OMP_CLAUSE_SCHEDULE_KIND (c)
= (enum omp_clause_schedule_kind)
(OMP_CLAUSE_SCHEDULE_KIND (c)
& ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC);
}
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_NOWAIT:
if (copyprivate_seen)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<nowait%> clause must not be used together "
"with %<copyprivate%>");
*pc = OMP_CLAUSE_CHAIN (c);
continue;
}
/* FALLTHRU */
default:
pc = &OMP_CLAUSE_CHAIN (c);
continue;
}
t = OMP_CLAUSE_DECL (c);
if (processing_template_decl
&& !VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
pc = &OMP_CLAUSE_CHAIN (c);
continue;
}
switch (c_kind)
{
case OMP_CLAUSE_LASTPRIVATE:
if (!bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
need_default_ctor = true;
need_dtor = true;
}
break;
case OMP_CLAUSE_REDUCTION:
if (finish_omp_reduction_clause (c, &need_default_ctor,
&need_dtor))
remove = true;
else
t = OMP_CLAUSE_DECL (c);
break;
case OMP_CLAUSE_COPYIN:
if (!VAR_P (t) || !CP_DECL_THREAD_LOCAL_P (t))
{
error ("%qE must be %<threadprivate%> for %<copyin%>", t);
remove = true;
}
break;
default:
break;
}
if (need_complete_type || need_copy_assignment)
{
t = require_complete_type (t);
if (t == error_mark_node)
remove = true;
else if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
&& !complete_type_or_else (TREE_TYPE (TREE_TYPE (t)), t))
remove = true;
}
if (need_implicitly_determined)
{
const char *share_name = NULL;
if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t))
share_name = "threadprivate";
else switch (cxx_omp_predetermined_sharing (t))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
/* const vars may be specified in firstprivate clause. */
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
&& cxx_omp_const_qual_no_mutable (t))
break;
share_name = "shared";
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
share_name = "private";
break;
default:
gcc_unreachable ();
}
if (share_name)
{
error ("%qE is predetermined %qs for %qs",
omp_clause_printable_decl (t), share_name,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
}
/* We're interested in the base element, not arrays. */
inner_type = type = TREE_TYPE (t);
if ((need_complete_type
|| need_copy_assignment
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
&& TREE_CODE (inner_type) == REFERENCE_TYPE)
inner_type = TREE_TYPE (inner_type);
while (TREE_CODE (inner_type) == ARRAY_TYPE)
inner_type = TREE_TYPE (inner_type);
/* Check for special function availability by building a call to one.
Save the results, because later we won't be in the right context
for making these queries. */
if (CLASS_TYPE_P (inner_type)
&& COMPLETE_TYPE_P (inner_type)
&& (need_default_ctor || need_copy_ctor
|| need_copy_assignment || need_dtor)
&& !type_dependent_expression_p (t)
&& cxx_omp_create_clause_info (c, inner_type, need_default_ctor,
need_copy_ctor, need_copy_assignment,
need_dtor))
remove = true;
if (!remove
&& c_kind == OMP_CLAUSE_SHARED
&& processing_template_decl)
{
t = omp_clause_decl_field (OMP_CLAUSE_DECL (c));
if (t)
OMP_CLAUSE_DECL (c) = t;
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
bitmap_obstack_release (NULL);
return clauses;
}
/* Start processing OpenMP clauses that can include any
privatization clauses for non-static data members. */
tree
push_omp_privatization_clauses (bool ignore_next)
{
if (omp_private_member_ignore_next)
{
omp_private_member_ignore_next = ignore_next;
return NULL_TREE;
}
omp_private_member_ignore_next = ignore_next;
if (omp_private_member_map)
omp_private_member_vec.safe_push (error_mark_node);
return push_stmt_list ();
}
/* Revert remapping of any non-static data members since
the last push_omp_privatization_clauses () call. */
void
pop_omp_privatization_clauses (tree stmt)
{
if (stmt == NULL_TREE)
return;
stmt = pop_stmt_list (stmt);
if (omp_private_member_map)
{
while (!omp_private_member_vec.is_empty ())
{
tree t = omp_private_member_vec.pop ();
if (t == error_mark_node)
{
add_stmt (stmt);
return;
}
bool no_decl_expr = t == integer_zero_node;
if (no_decl_expr)
t = omp_private_member_vec.pop ();
tree *v = omp_private_member_map->get (t);
gcc_assert (v);
if (!no_decl_expr)
add_decl_expr (*v);
omp_private_member_map->remove (t);
}
delete omp_private_member_map;
omp_private_member_map = NULL;
}
add_stmt (stmt);
}
/* Remember OpenMP privatization clauses mapping and clear it.
Used for lambdas. */
void
save_omp_privatization_clauses (vec<tree> &save)
{
save = vNULL;
if (omp_private_member_ignore_next)
save.safe_push (integer_one_node);
omp_private_member_ignore_next = false;
if (!omp_private_member_map)
return;
while (!omp_private_member_vec.is_empty ())
{
tree t = omp_private_member_vec.pop ();
if (t == error_mark_node)
{
save.safe_push (t);
continue;
}
tree n = t;
if (t == integer_zero_node)
t = omp_private_member_vec.pop ();
tree *v = omp_private_member_map->get (t);
gcc_assert (v);
save.safe_push (*v);
save.safe_push (t);
if (n != t)
save.safe_push (n);
}
delete omp_private_member_map;
omp_private_member_map = NULL;
}
/* Restore OpenMP privatization clauses mapping saved by the
above function. */
void
restore_omp_privatization_clauses (vec<tree> &save)
{
gcc_assert (omp_private_member_vec.is_empty ());
omp_private_member_ignore_next = false;
if (save.is_empty ())
return;
if (save.length () == 1 && save[0] == integer_one_node)
{
omp_private_member_ignore_next = true;
save.release ();
return;
}
omp_private_member_map = new hash_map <tree, tree>;
while (!save.is_empty ())
{
tree t = save.pop ();
tree n = t;
if (t != error_mark_node)
{
if (t == integer_one_node)
{
omp_private_member_ignore_next = true;
gcc_assert (save.is_empty ());
break;
}
if (t == integer_zero_node)
t = save.pop ();
tree &v = omp_private_member_map->get_or_insert (t);
v = save.pop ();
}
omp_private_member_vec.safe_push (t);
if (n != t)
omp_private_member_vec.safe_push (n);
}
save.release ();
}
/* For all variables in the tree_list VARS, mark them as thread local. */
void
finish_omp_threadprivate (tree vars)
{
tree t;
/* Mark every variable in VARS to be assigned thread local storage. */
for (t = vars; t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
if (error_operand_p (v))
;
else if (!VAR_P (v))
error ("%<threadprivate%> %qD is not file, namespace "
"or block scope variable", v);
/* If V had already been marked threadprivate, it doesn't matter
whether it had been used prior to this point. */
else if (TREE_USED (v)
&& (DECL_LANG_SPECIFIC (v) == NULL
|| !CP_DECL_THREADPRIVATE_P (v)))
error ("%qE declared %<threadprivate%> after first use", v);
else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v))
error ("automatic variable %qE cannot be %<threadprivate%>", v);
else if (! COMPLETE_TYPE_P (complete_type (TREE_TYPE (v))))
error ("%<threadprivate%> %qE has incomplete type", v);
else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v))
&& CP_DECL_CONTEXT (v) != current_class_type)
error ("%<threadprivate%> %qE directive not "
"in %qT definition", v, CP_DECL_CONTEXT (v));
else
{
/* Allocate a LANG_SPECIFIC structure for V, if needed. */
if (DECL_LANG_SPECIFIC (v) == NULL)
{
retrofit_lang_decl (v);
/* Make sure that DECL_DISCRIMINATOR_P continues to be true
after the allocation of the lang_decl structure. */
if (DECL_DISCRIMINATOR_P (v))
DECL_LANG_SPECIFIC (v)->u.base.u2sel = 1;
}
if (! CP_DECL_THREAD_LOCAL_P (v))
{
CP_DECL_THREAD_LOCAL_P (v) = true;
set_decl_tls_model (v, decl_default_tls_model (v));
/* If rtl has been already set for this var, call
make_decl_rtl once again, so that encode_section_info
has a chance to look at the new decl flags. */
if (DECL_RTL_SET_P (v))
make_decl_rtl (v);
}
CP_DECL_THREADPRIVATE_P (v) = 1;
}
}
}
/* Build an OpenMP structured block. */
tree
begin_omp_structured_block (void)
{
return do_pushlevel (sk_omp);
}
tree
finish_omp_structured_block (tree block)
{
return do_poplevel (block);
}
/* Similarly, except force the retention of the BLOCK. */
tree
begin_omp_parallel (void)
{
keep_next_level (true);
return begin_omp_structured_block ();
}
/* Generate OACC_DATA, with CLAUSES and BLOCK as its compound
statement. */
tree
finish_oacc_data (tree clauses, tree block)
{
tree stmt;
block = finish_omp_structured_block (block);
stmt = make_node (OACC_DATA);
TREE_TYPE (stmt) = void_type_node;
OACC_DATA_CLAUSES (stmt) = clauses;
OACC_DATA_BODY (stmt) = block;
return add_stmt (stmt);
}
/* Generate OACC_HOST_DATA, with CLAUSES and BLOCK as its compound
statement. */
tree
finish_oacc_host_data (tree clauses, tree block)
{
tree stmt;
block = finish_omp_structured_block (block);
stmt = make_node (OACC_HOST_DATA);
TREE_TYPE (stmt) = void_type_node;
OACC_HOST_DATA_CLAUSES (stmt) = clauses;
OACC_HOST_DATA_BODY (stmt) = block;
return add_stmt (stmt);
}
/* Generate OMP construct CODE, with BODY and CLAUSES as its compound
statement. */
tree
finish_omp_construct (enum tree_code code, tree body, tree clauses)
{
body = finish_omp_structured_block (body);
tree stmt = make_node (code);
TREE_TYPE (stmt) = void_type_node;
OMP_BODY (stmt) = body;
OMP_CLAUSES (stmt) = clauses;
return add_stmt (stmt);
}
tree
finish_omp_parallel (tree clauses, tree body)
{
tree stmt;
body = finish_omp_structured_block (body);
stmt = make_node (OMP_PARALLEL);
TREE_TYPE (stmt) = void_type_node;
OMP_PARALLEL_CLAUSES (stmt) = clauses;
OMP_PARALLEL_BODY (stmt) = body;
return add_stmt (stmt);
}
tree
begin_omp_task (void)
{
keep_next_level (true);
return begin_omp_structured_block ();
}
tree
finish_omp_task (tree clauses, tree body)
{
tree stmt;
body = finish_omp_structured_block (body);
stmt = make_node (OMP_TASK);
TREE_TYPE (stmt) = void_type_node;
OMP_TASK_CLAUSES (stmt) = clauses;
OMP_TASK_BODY (stmt) = body;
return add_stmt (stmt);
}
/* Helper function for finish_omp_for. Convert Ith random access iterator
into integral iterator. Return FALSE if successful. */
static bool
handle_omp_for_class_iterator (int i, location_t locus, enum tree_code code,
tree declv, tree orig_declv, tree initv,
tree condv, tree incrv, tree *body,
tree *pre_body, tree &clauses, tree *lastp,
int collapse, int ordered)
{
tree diff, iter_init, iter_incr = NULL, last;
tree incr_var = NULL, orig_pre_body, orig_body, c;
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
tree iter = decl;
location_t elocus = locus;
if (init && EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
cond = cp_fully_fold (cond);
switch (TREE_CODE (cond))
{
case GT_EXPR:
case GE_EXPR:
case LT_EXPR:
case LE_EXPR:
case NE_EXPR:
if (TREE_OPERAND (cond, 1) == iter)
cond = build2 (swap_tree_comparison (TREE_CODE (cond)),
TREE_TYPE (cond), iter, TREE_OPERAND (cond, 0));
if (TREE_OPERAND (cond, 0) != iter)
cond = error_mark_node;
else
{
tree tem = build_x_binary_op (EXPR_LOCATION (cond),
TREE_CODE (cond),
iter, ERROR_MARK,
TREE_OPERAND (cond, 1), ERROR_MARK,
NULL, tf_warning_or_error);
if (error_operand_p (tem))
return true;
}
break;
default:
cond = error_mark_node;
break;
}
if (cond == error_mark_node)
{
error_at (elocus, "invalid controlling predicate");
return true;
}
diff = build_x_binary_op (elocus, MINUS_EXPR, TREE_OPERAND (cond, 1),
ERROR_MARK, iter, ERROR_MARK, NULL,
tf_warning_or_error);
diff = cp_fully_fold (diff);
if (error_operand_p (diff))
return true;
if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE)
{
error_at (elocus, "difference between %qE and %qD does not have integer type",
TREE_OPERAND (cond, 1), iter);
return true;
}
if (!c_omp_check_loop_iv_exprs (locus, orig_declv,
TREE_VEC_ELT (declv, i), NULL_TREE,
cond, cp_walk_subtrees))
return true;
switch (TREE_CODE (incr))
{
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != iter)
{
incr = error_mark_node;
break;
}
iter_incr = build_x_unary_op (EXPR_LOCATION (incr),
TREE_CODE (incr), iter,
tf_warning_or_error);
if (error_operand_p (iter_incr))
return true;
else if (TREE_CODE (incr) == PREINCREMENT_EXPR
|| TREE_CODE (incr) == POSTINCREMENT_EXPR)
incr = integer_one_node;
else
incr = integer_minus_one_node;
break;
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != iter)
incr = error_mark_node;
else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
{
tree rhs = TREE_OPERAND (incr, 1);
if (TREE_OPERAND (rhs, 0) == iter)
{
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1)))
!= INTEGER_TYPE)
incr = error_mark_node;
else
{
iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs),
iter, TREE_CODE (rhs),
TREE_OPERAND (rhs, 1),
tf_warning_or_error);
if (error_operand_p (iter_incr))
return true;
incr = TREE_OPERAND (rhs, 1);
incr = cp_convert (TREE_TYPE (diff), incr,
tf_warning_or_error);
if (TREE_CODE (rhs) == MINUS_EXPR)
{
incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr);
incr = fold_simple (incr);
}
if (TREE_CODE (incr) != INTEGER_CST
&& (TREE_CODE (incr) != NOP_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 0))
!= INTEGER_CST)))
iter_incr = NULL;
}
}
else if (TREE_OPERAND (rhs, 1) == iter)
{
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE
|| TREE_CODE (rhs) != PLUS_EXPR)
incr = error_mark_node;
else
{
iter_incr = build_x_binary_op (EXPR_LOCATION (rhs),
PLUS_EXPR,
TREE_OPERAND (rhs, 0),
ERROR_MARK, iter,
ERROR_MARK, NULL,
tf_warning_or_error);
if (error_operand_p (iter_incr))
return true;
iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs),
iter, NOP_EXPR,
iter_incr,
tf_warning_or_error);
if (error_operand_p (iter_incr))
return true;
incr = TREE_OPERAND (rhs, 0);
iter_incr = NULL;
}
}
else
incr = error_mark_node;
}
else
incr = error_mark_node;
break;
default:
incr = error_mark_node;
break;
}
if (incr == error_mark_node)
{
error_at (elocus, "invalid increment expression");
return true;
}
incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error);
bool taskloop_iv_seen = false;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (c) == iter)
{
if (code == OMP_TASKLOOP)
{
taskloop_iv_seen = true;
OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV (c) = 1;
}
break;
}
else if (code == OMP_TASKLOOP
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (c) == iter)
{
taskloop_iv_seen = true;
OMP_CLAUSE_PRIVATE_TASKLOOP_IV (c) = 1;
}
decl = create_temporary_var (TREE_TYPE (diff));
pushdecl (decl);
add_decl_expr (decl);
last = create_temporary_var (TREE_TYPE (diff));
pushdecl (last);
add_decl_expr (last);
if (c && iter_incr == NULL && TREE_CODE (incr) != INTEGER_CST
&& (!ordered || (i < collapse && collapse > 1)))
{
incr_var = create_temporary_var (TREE_TYPE (diff));
pushdecl (incr_var);
add_decl_expr (incr_var);
}
gcc_assert (stmts_are_full_exprs_p ());
tree diffvar = NULL_TREE;
if (code == OMP_TASKLOOP)
{
if (!taskloop_iv_seen)
{
tree ivc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (ivc) = iter;
cxx_omp_finish_clause (ivc, NULL);
OMP_CLAUSE_CHAIN (ivc) = clauses;
clauses = ivc;
}
tree lvc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (lvc) = last;
OMP_CLAUSE_CHAIN (lvc) = clauses;
clauses = lvc;
diffvar = create_temporary_var (TREE_TYPE (diff));
pushdecl (diffvar);
add_decl_expr (diffvar);
}
orig_pre_body = *pre_body;
*pre_body = push_stmt_list ();
if (orig_pre_body)
add_stmt (orig_pre_body);
if (init != NULL)
finish_expr_stmt (build_x_modify_expr (elocus,
iter, NOP_EXPR, init,
tf_warning_or_error));
init = build_int_cst (TREE_TYPE (diff), 0);
if (c && iter_incr == NULL
&& (!ordered || (i < collapse && collapse > 1)))
{
if (incr_var)
{
finish_expr_stmt (build_x_modify_expr (elocus,
incr_var, NOP_EXPR,
incr, tf_warning_or_error));
incr = incr_var;
}
iter_incr = build_x_modify_expr (elocus,
iter, PLUS_EXPR, incr,
tf_warning_or_error);
}
if (c && ordered && i < collapse && collapse > 1)
iter_incr = incr;
finish_expr_stmt (build_x_modify_expr (elocus,
last, NOP_EXPR, init,
tf_warning_or_error));
if (diffvar)
{
finish_expr_stmt (build_x_modify_expr (elocus,
diffvar, NOP_EXPR,
diff, tf_warning_or_error));
diff = diffvar;
}
*pre_body = pop_stmt_list (*pre_body);
cond = cp_build_binary_op (elocus,
TREE_CODE (cond), decl, diff,
tf_warning_or_error);
incr = build_modify_expr (elocus, decl, NULL_TREE, PLUS_EXPR,
elocus, incr, NULL_TREE);
orig_body = *body;
*body = push_stmt_list ();
iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last);
iter_init = build_x_modify_expr (elocus,
iter, PLUS_EXPR, iter_init,
tf_warning_or_error);
if (iter_init != error_mark_node)
iter_init = build1 (NOP_EXPR, void_type_node, iter_init);
finish_expr_stmt (iter_init);
finish_expr_stmt (build_x_modify_expr (elocus,
last, NOP_EXPR, decl,
tf_warning_or_error));
add_stmt (orig_body);
*body = pop_stmt_list (*body);
if (c)
{
OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list ();
if (!ordered)
finish_expr_stmt (iter_incr);
else
{
iter_init = decl;
if (i < collapse && collapse > 1 && !error_operand_p (iter_incr))
iter_init = build2 (PLUS_EXPR, TREE_TYPE (diff),
iter_init, iter_incr);
iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), iter_init, last);
iter_init = build_x_modify_expr (elocus,
iter, PLUS_EXPR, iter_init,
tf_warning_or_error);
if (iter_init != error_mark_node)
iter_init = build1 (NOP_EXPR, void_type_node, iter_init);
finish_expr_stmt (iter_init);
}
OMP_CLAUSE_LASTPRIVATE_STMT (c)
= pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c));
}
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
*lastp = last;
return false;
}
/* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR
are directly for their associated operands in the statement. DECL
and INIT are a combo; if DECL is NULL then INIT ought to be a
MODIFY_EXPR, and the DECL should be extracted. PRE_BODY are
optional statements that need to go before the loop into its
sk_omp scope. */
tree
finish_omp_for (location_t locus, enum tree_code code, tree declv,
tree orig_declv, tree initv, tree condv, tree incrv,
tree body, tree pre_body, vec<tree> *orig_inits, tree clauses)
{
tree omp_for = NULL, orig_incr = NULL;
tree decl = NULL, init, cond, incr, orig_decl = NULL_TREE, block = NULL_TREE;
tree last = NULL_TREE;
location_t elocus;
int i;
int collapse = 1;
int ordered = 0;
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
if (TREE_VEC_LENGTH (declv) > 1)
{
tree c;
c = omp_find_clause (clauses, OMP_CLAUSE_TILE);
if (c)
collapse = list_length (OMP_CLAUSE_TILE_LIST (c));
else
{
c = omp_find_clause (clauses, OMP_CLAUSE_COLLAPSE);
if (c)
collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (c));
if (collapse != TREE_VEC_LENGTH (declv))
ordered = TREE_VEC_LENGTH (declv);
}
}
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
decl = TREE_VEC_ELT (declv, i);
init = TREE_VEC_ELT (initv, i);
cond = TREE_VEC_ELT (condv, i);
incr = TREE_VEC_ELT (incrv, i);
elocus = locus;
if (decl == NULL)
{
if (init != NULL)
switch (TREE_CODE (init))
{
case MODIFY_EXPR:
decl = TREE_OPERAND (init, 0);
init = TREE_OPERAND (init, 1);
break;
case MODOP_EXPR:
if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR)
{
decl = TREE_OPERAND (init, 0);
init = TREE_OPERAND (init, 2);
}
break;
default:
break;
}
if (decl == NULL)
{
error_at (locus,
"expected iteration declaration or initialization");
return NULL;
}
}
if (init && EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
if (cond == NULL)
{
error_at (elocus, "missing controlling predicate");
return NULL;
}
if (incr == NULL)
{
error_at (elocus, "missing increment expression");
return NULL;
}
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
}
if (orig_inits)
{
bool fail = false;
tree orig_init;
FOR_EACH_VEC_ELT (*orig_inits, i, orig_init)
if (orig_init
&& !c_omp_check_loop_iv_exprs (locus, declv,
TREE_VEC_ELT (declv, i), orig_init,
NULL_TREE, cp_walk_subtrees))
fail = true;
if (fail)
return NULL;
}
if (dependent_omp_for_p (declv, initv, condv, incrv))
{
tree stmt;
stmt = make_node (code);
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
/* This is really just a place-holder. We'll be decomposing this
again and going through the cp_build_modify_expr path below when
we instantiate the thing. */
TREE_VEC_ELT (initv, i)
= build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i),
TREE_VEC_ELT (initv, i));
}
TREE_TYPE (stmt) = void_type_node;
OMP_FOR_INIT (stmt) = initv;
OMP_FOR_COND (stmt) = condv;
OMP_FOR_INCR (stmt) = incrv;
OMP_FOR_BODY (stmt) = body;
OMP_FOR_PRE_BODY (stmt) = pre_body;
OMP_FOR_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, locus);
return add_stmt (stmt);
}
if (!orig_declv)
orig_declv = copy_node (declv);
if (processing_template_decl)
orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv));
for (i = 0; i < TREE_VEC_LENGTH (declv); )
{
decl = TREE_VEC_ELT (declv, i);
init = TREE_VEC_ELT (initv, i);
cond = TREE_VEC_ELT (condv, i);
incr = TREE_VEC_ELT (incrv, i);
if (orig_incr)
TREE_VEC_ELT (orig_incr, i) = incr;
elocus = locus;
if (init && EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
if (!DECL_P (decl))
{
error_at (elocus, "expected iteration declaration or initialization");
return NULL;
}
if (incr && TREE_CODE (incr) == MODOP_EXPR)
{
if (orig_incr)
TREE_VEC_ELT (orig_incr, i) = incr;
incr = cp_build_modify_expr (elocus, TREE_OPERAND (incr, 0),
TREE_CODE (TREE_OPERAND (incr, 1)),
TREE_OPERAND (incr, 2),
tf_warning_or_error);
}
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
if (code == OMP_SIMD)
{
error_at (elocus, "%<#pragma omp simd%> used with class "
"iteration variable %qE", decl);
return NULL;
}
if (code == CILK_FOR && i == 0)
orig_decl = decl;
if (handle_omp_for_class_iterator (i, locus, code, declv, orig_declv,
initv, condv, incrv, &body,
&pre_body, clauses, &last,
collapse, ordered))
return NULL;
continue;
}
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
&& !TYPE_PTR_P (TREE_TYPE (decl)))
{
error_at (elocus, "invalid type for iteration variable %qE", decl);
return NULL;
}
if (!processing_template_decl)
{
init = fold_build_cleanup_point_expr (TREE_TYPE (init), init);
init = cp_build_modify_expr (elocus, decl, NOP_EXPR, init,
tf_warning_or_error);
}
else
init = build2 (MODIFY_EXPR, void_type_node, decl, init);
if (cond
&& TREE_SIDE_EFFECTS (cond)
&& COMPARISON_CLASS_P (cond)
&& !processing_template_decl)
{
tree t = TREE_OPERAND (cond, 0);
if (TREE_SIDE_EFFECTS (t)
&& t != decl
&& (TREE_CODE (t) != NOP_EXPR
|| TREE_OPERAND (t, 0) != decl))
TREE_OPERAND (cond, 0)
= fold_build_cleanup_point_expr (TREE_TYPE (t), t);
t = TREE_OPERAND (cond, 1);
if (TREE_SIDE_EFFECTS (t)
&& t != decl
&& (TREE_CODE (t) != NOP_EXPR
|| TREE_OPERAND (t, 0) != decl))
TREE_OPERAND (cond, 1)
= fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
if (decl == error_mark_node || init == error_mark_node)
return NULL;
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
i++;
}
if (IS_EMPTY_STMT (pre_body))
pre_body = NULL;
if (code == CILK_FOR && !processing_template_decl)
block = push_stmt_list ();
omp_for = c_finish_omp_for (locus, code, declv, orig_declv, initv, condv,
incrv, body, pre_body);
/* Check for iterators appearing in lb, b or incr expressions. */
if (omp_for && !c_omp_check_loop_iv (omp_for, orig_declv, cp_walk_subtrees))
omp_for = NULL_TREE;
if (omp_for == NULL)
{
if (block)
pop_stmt_list (block);
return NULL;
}
add_stmt (omp_for);
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++)
{
decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), i), 0);
incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i);
if (TREE_CODE (incr) != MODIFY_EXPR)
continue;
if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1))
&& BINARY_CLASS_P (TREE_OPERAND (incr, 1))
&& !processing_template_decl)
{
tree t = TREE_OPERAND (TREE_OPERAND (incr, 1), 0);
if (TREE_SIDE_EFFECTS (t)
&& t != decl
&& (TREE_CODE (t) != NOP_EXPR
|| TREE_OPERAND (t, 0) != decl))
TREE_OPERAND (TREE_OPERAND (incr, 1), 0)
= fold_build_cleanup_point_expr (TREE_TYPE (t), t);
t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1);
if (TREE_SIDE_EFFECTS (t)
&& t != decl
&& (TREE_CODE (t) != NOP_EXPR
|| TREE_OPERAND (t, 0) != decl))
TREE_OPERAND (TREE_OPERAND (incr, 1), 1)
= fold_build_cleanup_point_expr (TREE_TYPE (t), t);
}
if (orig_incr)
TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i);
}
OMP_FOR_CLAUSES (omp_for) = clauses;
/* For simd loops with non-static data member iterators, we could have added
OMP_CLAUSE_LINEAR clauses without OMP_CLAUSE_LINEAR_STEP. As we know the
step at this point, fill it in. */
if (code == OMP_SIMD && !processing_template_decl
&& TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)) == 1)
for (tree c = omp_find_clause (clauses, OMP_CLAUSE_LINEAR); c;
c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_LINEAR))
if (OMP_CLAUSE_LINEAR_STEP (c) == NULL_TREE)
{
decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0), 0);
gcc_assert (decl == OMP_CLAUSE_DECL (c));
incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0);
tree step, stept;
switch (TREE_CODE (incr))
{
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (TREE_TYPE (decl), 1);
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
OMP_CLAUSE_LINEAR_STEP (c)
= build_int_cst (TREE_TYPE (decl), -1);
break;
case MODIFY_EXPR:
gcc_assert (TREE_OPERAND (incr, 0) == decl);
incr = TREE_OPERAND (incr, 1);
switch (TREE_CODE (incr))
{
case PLUS_EXPR:
if (TREE_OPERAND (incr, 1) == decl)
step = TREE_OPERAND (incr, 0);
else
step = TREE_OPERAND (incr, 1);
break;
case MINUS_EXPR:
case POINTER_PLUS_EXPR:
gcc_assert (TREE_OPERAND (incr, 0) == decl);
step = TREE_OPERAND (incr, 1);
break;
default:
gcc_unreachable ();
}
stept = TREE_TYPE (decl);
if (POINTER_TYPE_P (stept))
stept = sizetype;
step = fold_convert (stept, step);
if (TREE_CODE (incr) == MINUS_EXPR)
step = fold_build1 (NEGATE_EXPR, stept, step);
OMP_CLAUSE_LINEAR_STEP (c) = step;
break;
default:
gcc_unreachable ();
}
}
if (block)
{
tree omp_par = make_node (OMP_PARALLEL);
TREE_TYPE (omp_par) = void_type_node;
OMP_PARALLEL_CLAUSES (omp_par) = NULL_TREE;
tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = pop_stmt_list (block);
OMP_PARALLEL_BODY (omp_par) = bind;
if (OMP_FOR_PRE_BODY (omp_for))
{
add_stmt (OMP_FOR_PRE_BODY (omp_for));
OMP_FOR_PRE_BODY (omp_for) = NULL_TREE;
}
init = TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0);
decl = TREE_OPERAND (init, 0);
cond = TREE_VEC_ELT (OMP_FOR_COND (omp_for), 0);
incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0);
tree t = TREE_OPERAND (cond, 1), c, clauses, *pc;
clauses = OMP_FOR_CLAUSES (omp_for);
OMP_FOR_CLAUSES (omp_for) = NULL_TREE;
for (pc = &clauses; *pc; )
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_SCHEDULE)
{
gcc_assert (OMP_FOR_CLAUSES (omp_for) == NULL_TREE);
OMP_FOR_CLAUSES (omp_for) = *pc;
*pc = OMP_CLAUSE_CHAIN (*pc);
OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (omp_for)) = NULL_TREE;
}
else
{
gcc_assert (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE);
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (TREE_CODE (t) != INTEGER_CST)
{
TREE_OPERAND (cond, 1) = get_temp_regvar (TREE_TYPE (t), t);
c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (cond, 1);
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
if (TREE_CODE (incr) == MODIFY_EXPR)
{
t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1);
if (TREE_CODE (t) != INTEGER_CST)
{
TREE_OPERAND (TREE_OPERAND (incr, 1), 1)
= get_temp_regvar (TREE_TYPE (t), t);
c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (TREE_OPERAND (incr, 1), 1);
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
}
t = TREE_OPERAND (init, 1);
if (TREE_CODE (t) != INTEGER_CST)
{
TREE_OPERAND (init, 1) = get_temp_regvar (TREE_TYPE (t), t);
c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (init, 1);
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
if (orig_decl && orig_decl != decl)
{
c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = orig_decl;
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
if (last)
{
c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = last;
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
c = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
c = build_omp_clause (input_location, OMP_CLAUSE__CILK_FOR_COUNT_);
OMP_CLAUSE_OPERAND (c, 0)
= cilk_for_number_of_iterations (omp_for);
OMP_CLAUSE_CHAIN (c) = clauses;
OMP_PARALLEL_CLAUSES (omp_par) = finish_omp_clauses (c, C_ORT_CILK);
add_stmt (omp_par);
return omp_par;
}
else if (code == CILK_FOR && processing_template_decl)
{
tree c, clauses = OMP_FOR_CLAUSES (omp_for);
if (orig_decl && orig_decl != decl)
{
c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = orig_decl;
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
if (last)
{
c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = last;
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
OMP_FOR_CLAUSES (omp_for) = clauses;
}
return omp_for;
}
void
finish_omp_atomic (enum tree_code code, enum tree_code opcode, tree lhs,
tree rhs, tree v, tree lhs1, tree rhs1, bool seq_cst)
{
tree orig_lhs;
tree orig_rhs;
tree orig_v;
tree orig_lhs1;
tree orig_rhs1;
bool dependent_p;
tree stmt;
orig_lhs = lhs;
orig_rhs = rhs;
orig_v = v;
orig_lhs1 = lhs1;
orig_rhs1 = rhs1;
dependent_p = false;
stmt = NULL_TREE;
/* Even in a template, we can detect invalid uses of the atomic
pragma if neither LHS nor RHS is type-dependent. */
if (processing_template_decl)
{
dependent_p = (type_dependent_expression_p (lhs)
|| (rhs && type_dependent_expression_p (rhs))
|| (v && type_dependent_expression_p (v))
|| (lhs1 && type_dependent_expression_p (lhs1))
|| (rhs1 && type_dependent_expression_p (rhs1)));
if (!dependent_p)
{
lhs = build_non_dependent_expr (lhs);
if (rhs)
rhs = build_non_dependent_expr (rhs);
if (v)
v = build_non_dependent_expr (v);
if (lhs1)
lhs1 = build_non_dependent_expr (lhs1);
if (rhs1)
rhs1 = build_non_dependent_expr (rhs1);
}
}
if (!dependent_p)
{
bool swapped = false;
if (rhs1 && cp_tree_equal (lhs, rhs))
{
std::swap (rhs, rhs1);
swapped = !commutative_tree_code (opcode);
}
if (rhs1 && !cp_tree_equal (lhs, rhs1))
{
if (code == OMP_ATOMIC)
error ("%<#pragma omp atomic update%> uses two different "
"expressions for memory");
else
error ("%<#pragma omp atomic capture%> uses two different "
"expressions for memory");
return;
}
if (lhs1 && !cp_tree_equal (lhs, lhs1))
{
if (code == OMP_ATOMIC)
error ("%<#pragma omp atomic update%> uses two different "
"expressions for memory");
else
error ("%<#pragma omp atomic capture%> uses two different "
"expressions for memory");
return;
}
stmt = c_finish_omp_atomic (input_location, code, opcode, lhs, rhs,
v, lhs1, rhs1, swapped, seq_cst,
processing_template_decl != 0);
if (stmt == error_mark_node)
return;
}
if (processing_template_decl)
{
if (code == OMP_ATOMIC_READ)
{
stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs),
OMP_ATOMIC_READ, orig_lhs);
OMP_ATOMIC_SEQ_CST (stmt) = seq_cst;
stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt);
}
else
{
if (opcode == NOP_EXPR)
stmt = build2 (MODIFY_EXPR, void_type_node, orig_lhs, orig_rhs);
else
stmt = build2 (opcode, void_type_node, orig_lhs, orig_rhs);
if (orig_rhs1)
stmt = build_min_nt_loc (EXPR_LOCATION (orig_rhs1),
COMPOUND_EXPR, orig_rhs1, stmt);
if (code != OMP_ATOMIC)
{
stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs1),
code, orig_lhs1, stmt);
OMP_ATOMIC_SEQ_CST (stmt) = seq_cst;
stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt);
}
}
stmt = build2 (OMP_ATOMIC, void_type_node, integer_zero_node, stmt);
OMP_ATOMIC_SEQ_CST (stmt) = seq_cst;
}
finish_expr_stmt (stmt);
}
void
finish_omp_barrier (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
}
void
finish_omp_flush (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
}
void
finish_omp_taskwait (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
}
void
finish_omp_taskyield (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
}
void
finish_omp_cancel (tree clauses)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
int mask = 0;
if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL))
mask = 1;
else if (omp_find_clause (clauses, OMP_CLAUSE_FOR))
mask = 2;
else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS))
mask = 4;
else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP))
mask = 8;
else
{
error ("%<#pragma omp cancel%> must specify one of "
"%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses");
return;
}
vec<tree, va_gc> *vec = make_tree_vector ();
tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
if (ifc != NULL_TREE)
{
tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc));
ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR,
boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc),
build_zero_cst (type));
}
else
ifc = boolean_true_node;
vec->quick_push (build_int_cst (integer_type_node, mask));
vec->quick_push (ifc);
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
}
void
finish_omp_cancellation_point (tree clauses)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT);
int mask = 0;
if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL))
mask = 1;
else if (omp_find_clause (clauses, OMP_CLAUSE_FOR))
mask = 2;
else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS))
mask = 4;
else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP))
mask = 8;
else
{
error ("%<#pragma omp cancellation point%> must specify one of "
"%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses");
return;
}
vec<tree, va_gc> *vec
= make_tree_vector_single (build_int_cst (integer_type_node, mask));
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
}
/* Begin a __transaction_atomic or __transaction_relaxed statement.
If PCOMPOUND is non-null, this is for a function-transaction-block, and we
should create an extra compound stmt. */
tree
begin_transaction_stmt (location_t loc, tree *pcompound, int flags)
{
tree r;
if (pcompound)
*pcompound = begin_compound_stmt (0);
r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE);
/* Only add the statement to the function if support enabled. */
if (flag_tm)
add_stmt (r);
else
error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0
? G_("%<__transaction_relaxed%> without "
"transactional memory support enabled")
: G_("%<__transaction_atomic%> without "
"transactional memory support enabled")));
TRANSACTION_EXPR_BODY (r) = push_stmt_list ();
TREE_SIDE_EFFECTS (r) = 1;
return r;
}
/* End a __transaction_atomic or __transaction_relaxed statement.
If COMPOUND_STMT is non-null, this is for a function-transaction-block,
and we should end the compound. If NOEX is non-NULL, we wrap the body in
a MUST_NOT_THROW_EXPR with NOEX as condition. */
void
finish_transaction_stmt (tree stmt, tree compound_stmt, int flags, tree noex)
{
TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt));
TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0;
TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0;
TRANSACTION_EXPR_IS_STMT (stmt) = 1;
/* noexcept specifications are not allowed for function transactions. */
gcc_assert (!(noex && compound_stmt));
if (noex)
{
tree body = build_must_not_throw_expr (TRANSACTION_EXPR_BODY (stmt),
noex);
protected_set_expr_location
(body, EXPR_LOCATION (TRANSACTION_EXPR_BODY (stmt)));
TREE_SIDE_EFFECTS (body) = 1;
TRANSACTION_EXPR_BODY (stmt) = body;
}
if (compound_stmt)
finish_compound_stmt (compound_stmt);
}
/* Build a __transaction_atomic or __transaction_relaxed expression. If
NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as
condition. */
tree
build_transaction_expr (location_t loc, tree expr, int flags, tree noex)
{
tree ret;
if (noex)
{
expr = build_must_not_throw_expr (expr, noex);
protected_set_expr_location (expr, loc);
TREE_SIDE_EFFECTS (expr) = 1;
}
ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr);
if (flags & TM_STMT_ATTR_RELAXED)
TRANSACTION_EXPR_RELAXED (ret) = 1;
TREE_SIDE_EFFECTS (ret) = 1;
SET_EXPR_LOCATION (ret, loc);
return ret;
}
void
init_cp_semantics (void)
{
}
/* Build a STATIC_ASSERT for a static assertion with the condition
CONDITION and the message text MESSAGE. LOCATION is the location
of the static assertion in the source code. When MEMBER_P, this
static assertion is a member of a class. */
void
finish_static_assert (tree condition, tree message, location_t location,
bool member_p)
{
if (message == NULL_TREE
|| message == error_mark_node
|| condition == NULL_TREE
|| condition == error_mark_node)
return;
if (check_for_bare_parameter_packs (condition))
condition = error_mark_node;
if (type_dependent_expression_p (condition)
|| value_dependent_expression_p (condition))
{
/* We're in a template; build a STATIC_ASSERT and put it in
the right place. */
tree assertion;
assertion = make_node (STATIC_ASSERT);
STATIC_ASSERT_CONDITION (assertion) = condition;
STATIC_ASSERT_MESSAGE (assertion) = message;
STATIC_ASSERT_SOURCE_LOCATION (assertion) = location;
if (member_p)
maybe_add_class_template_decl_list (current_class_type,
assertion,
/*friend_p=*/0);
else
add_stmt (assertion);
return;
}
/* Fold the expression and convert it to a boolean value. */
condition = instantiate_non_dependent_expr (condition);
condition = cp_convert (boolean_type_node, condition, tf_warning_or_error);
condition = maybe_constant_value (condition);
if (TREE_CODE (condition) == INTEGER_CST && !integer_zerop (condition))
/* Do nothing; the condition is satisfied. */
;
else
{
location_t saved_loc = input_location;
input_location = location;
if (TREE_CODE (condition) == INTEGER_CST
&& integer_zerop (condition))
{
int sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT
(TREE_TYPE (TREE_TYPE (message))));
int len = TREE_STRING_LENGTH (message) / sz - 1;
/* Report the error. */
if (len == 0)
error ("static assertion failed");
else
error ("static assertion failed: %s",
TREE_STRING_POINTER (message));
}
else if (condition && condition != error_mark_node)
{
error ("non-constant condition for static assertion");
if (require_potential_rvalue_constant_expression (condition))
cxx_constant_value (condition);
}
input_location = saved_loc;
}
}
/* Implements the C++0x decltype keyword. Returns the type of EXPR,
suitable for use as a type-specifier.
ID_EXPRESSION_OR_MEMBER_ACCESS_P is true when EXPR was parsed as an
id-expression or a class member access, FALSE when it was parsed as
a full expression. */
tree
finish_decltype_type (tree expr, bool id_expression_or_member_access_p,
tsubst_flags_t complain)
{
tree type = NULL_TREE;
if (!expr || error_operand_p (expr))
return error_mark_node;
if (TYPE_P (expr)
|| TREE_CODE (expr) == TYPE_DECL
|| (TREE_CODE (expr) == BIT_NOT_EXPR
&& TYPE_P (TREE_OPERAND (expr, 0))))
{
if (complain & tf_error)
error ("argument to decltype must be an expression");
return error_mark_node;
}
/* Depending on the resolution of DR 1172, we may later need to distinguish
instantiation-dependent but not type-dependent expressions so that, say,
A<decltype(sizeof(T))>::U doesn't require 'typename'. */
if (instantiation_dependent_uneval_expression_p (expr))
{
type = cxx_make_type (DECLTYPE_TYPE);
DECLTYPE_TYPE_EXPR (type) = expr;
DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type)
= id_expression_or_member_access_p;
SET_TYPE_STRUCTURAL_EQUALITY (type);
return type;
}
/* The type denoted by decltype(e) is defined as follows: */
expr = resolve_nondeduced_context (expr, complain);
if (invalid_nonstatic_memfn_p (input_location, expr, complain))
return error_mark_node;
if (type_unknown_p (expr))
{
if (complain & tf_error)
error ("decltype cannot resolve address of overloaded function");
return error_mark_node;
}
/* To get the size of a static data member declared as an array of
unknown bound, we need to instantiate it. */
if (VAR_P (expr)
&& VAR_HAD_UNKNOWN_BOUND (expr)
&& DECL_TEMPLATE_INSTANTIATION (expr))
instantiate_decl (expr, /*defer_ok*/true, /*expl_inst_mem*/false);
if (id_expression_or_member_access_p)
{
/* If e is an id-expression or a class member access (5.2.5
[expr.ref]), decltype(e) is defined as the type of the entity
named by e. If there is no such entity, or e names a set of
overloaded functions, the program is ill-formed. */
if (identifier_p (expr))
expr = lookup_name (expr);
if (INDIRECT_REF_P (expr))
/* This can happen when the expression is, e.g., "a.b". Just
look at the underlying operand. */
expr = TREE_OPERAND (expr, 0);
if (TREE_CODE (expr) == OFFSET_REF
|| TREE_CODE (expr) == MEMBER_REF
|| TREE_CODE (expr) == SCOPE_REF)
/* We're only interested in the field itself. If it is a
BASELINK, we will need to see through it in the next
step. */
expr = TREE_OPERAND (expr, 1);
if (BASELINK_P (expr))
/* See through BASELINK nodes to the underlying function. */
expr = BASELINK_FUNCTIONS (expr);
/* decltype of a decomposition name drops references in the tuple case
(unlike decltype of a normal variable) and keeps cv-qualifiers from
the containing object in the other cases (unlike decltype of a member
access expression). */
if (DECL_DECOMPOSITION_P (expr))
{
if (DECL_HAS_VALUE_EXPR_P (expr))
/* Expr is an array or struct subobject proxy, handle
bit-fields properly. */
return unlowered_expr_type (expr);
else
/* Expr is a reference variable for the tuple case. */
return lookup_decomp_type (expr);
}
switch (TREE_CODE (expr))
{
case FIELD_DECL:
if (DECL_BIT_FIELD_TYPE (expr))
{
type = DECL_BIT_FIELD_TYPE (expr);
break;
}
/* Fall through for fields that aren't bitfields. */
gcc_fallthrough ();
case FUNCTION_DECL:
case VAR_DECL:
case CONST_DECL:
case PARM_DECL:
case RESULT_DECL:
case TEMPLATE_PARM_INDEX:
expr = mark_type_use (expr);
type = TREE_TYPE (expr);
break;
case ERROR_MARK:
type = error_mark_node;
break;
case COMPONENT_REF:
case COMPOUND_EXPR:
mark_type_use (expr);
type = is_bitfield_expr_with_lowered_type (expr);
if (!type)
type = TREE_TYPE (TREE_OPERAND (expr, 1));
break;
case BIT_FIELD_REF:
gcc_unreachable ();
case INTEGER_CST:
case PTRMEM_CST:
/* We can get here when the id-expression refers to an
enumerator or non-type template parameter. */
type = TREE_TYPE (expr);
break;
default:
/* Handle instantiated template non-type arguments. */
type = TREE_TYPE (expr);
break;
}
}
else
{
/* Within a lambda-expression:
Every occurrence of decltype((x)) where x is a possibly
parenthesized id-expression that names an entity of
automatic storage duration is treated as if x were
transformed into an access to a corresponding data member
of the closure type that would have been declared if x
were a use of the denoted entity. */
if (outer_automatic_var_p (expr)
&& current_function_decl
&& LAMBDA_FUNCTION_P (current_function_decl))
type = capture_decltype (expr);
else if (error_operand_p (expr))
type = error_mark_node;
else if (expr == current_class_ptr)
/* If the expression is just "this", we want the
cv-unqualified pointer for the "this" type. */
type = TYPE_MAIN_VARIANT (TREE_TYPE (expr));
else
{
/* Otherwise, where T is the type of e, if e is an lvalue,
decltype(e) is defined as T&; if an xvalue, T&&; otherwise, T. */
cp_lvalue_kind clk = lvalue_kind (expr);
type = unlowered_expr_type (expr);
gcc_assert (TREE_CODE (type) != REFERENCE_TYPE);
/* For vector types, pick a non-opaque variant. */
if (VECTOR_TYPE_P (type))
type = strip_typedefs (type);
if (clk != clk_none && !(clk & clk_class))
type = cp_build_reference_type (type, (clk & clk_rvalueref));
}
}
return type;
}
/* Called from trait_expr_value to evaluate either __has_nothrow_assign or
__has_nothrow_copy, depending on assign_p. */
static bool
classtype_has_nothrow_assign_or_copy_p (tree type, bool assign_p)
{
tree fns;
if (assign_p)
{
int ix;
ix = lookup_fnfields_1 (type, cp_assignment_operator_id (NOP_EXPR));
if (ix < 0)
return false;
fns = (*CLASSTYPE_METHOD_VEC (type))[ix];
}
else if (TYPE_HAS_COPY_CTOR (type))
{
/* If construction of the copy constructor was postponed, create
it now. */
if (CLASSTYPE_LAZY_COPY_CTOR (type))
lazily_declare_fn (sfk_copy_constructor, type);
if (CLASSTYPE_LAZY_MOVE_CTOR (type))
lazily_declare_fn (sfk_move_constructor, type);
fns = CLASSTYPE_CONSTRUCTORS (type);
}
else
return false;
for (; fns; fns = OVL_NEXT (fns))
{
tree fn = OVL_CURRENT (fns);
if (assign_p)
{
if (copy_fn_p (fn) == 0)
continue;
}
else if (copy_fn_p (fn) <= 0)
continue;
maybe_instantiate_noexcept (fn);
if (!TYPE_NOTHROW_P (TREE_TYPE (fn)))
return false;
}
return true;
}
/* Actually evaluates the trait. */
static bool
trait_expr_value (cp_trait_kind kind, tree type1, tree type2)
{
enum tree_code type_code1;
tree t;
type_code1 = TREE_CODE (type1);
switch (kind)
{
case CPTK_HAS_NOTHROW_ASSIGN:
type1 = strip_array_types (type1);
return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE
&& (trait_expr_value (CPTK_HAS_TRIVIAL_ASSIGN, type1, type2)
|| (CLASS_TYPE_P (type1)
&& classtype_has_nothrow_assign_or_copy_p (type1,
true))));
case CPTK_HAS_TRIVIAL_ASSIGN:
/* ??? The standard seems to be missing the "or array of such a class
type" wording for this trait. */
type1 = strip_array_types (type1);
return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE
&& (trivial_type_p (type1)
|| (CLASS_TYPE_P (type1)
&& TYPE_HAS_TRIVIAL_COPY_ASSIGN (type1))));
case CPTK_HAS_NOTHROW_CONSTRUCTOR:
type1 = strip_array_types (type1);
return (trait_expr_value (CPTK_HAS_TRIVIAL_CONSTRUCTOR, type1, type2)
|| (CLASS_TYPE_P (type1)
&& (t = locate_ctor (type1))
&& (maybe_instantiate_noexcept (t),
TYPE_NOTHROW_P (TREE_TYPE (t)))));
case CPTK_HAS_TRIVIAL_CONSTRUCTOR:
type1 = strip_array_types (type1);
return (trivial_type_p (type1)
|| (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DFLT (type1)));
case CPTK_HAS_NOTHROW_COPY:
type1 = strip_array_types (type1);
return (trait_expr_value (CPTK_HAS_TRIVIAL_COPY, type1, type2)
|| (CLASS_TYPE_P (type1)
&& classtype_has_nothrow_assign_or_copy_p (type1, false)));
case CPTK_HAS_TRIVIAL_COPY:
/* ??? The standard seems to be missing the "or array of such a class
type" wording for this trait. */
type1 = strip_array_types (type1);
return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE
|| (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_CTOR (type1)));
case CPTK_HAS_TRIVIAL_DESTRUCTOR:
type1 = strip_array_types (type1);
return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE
|| (CLASS_TYPE_P (type1)
&& TYPE_HAS_TRIVIAL_DESTRUCTOR (type1)));
case CPTK_HAS_VIRTUAL_DESTRUCTOR:
return type_has_virtual_destructor (type1);
case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS:
return type_has_unique_obj_representations (type1);
case CPTK_IS_ABSTRACT:
return ABSTRACT_CLASS_TYPE_P (type1);
case CPTK_IS_AGGREGATE:
return CP_AGGREGATE_TYPE_P (type1);
case CPTK_IS_BASE_OF:
return (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2)
&& (same_type_ignoring_top_level_qualifiers_p (type1, type2)
|| DERIVED_FROM_P (type1, type2)));
case CPTK_IS_CLASS:
return NON_UNION_CLASS_TYPE_P (type1);
case CPTK_IS_EMPTY:
return NON_UNION_CLASS_TYPE_P (type1) && CLASSTYPE_EMPTY_P (type1);
case CPTK_IS_ENUM:
return type_code1 == ENUMERAL_TYPE;
case CPTK_IS_FINAL:
return CLASS_TYPE_P (type1) && CLASSTYPE_FINAL (type1);
case CPTK_IS_LITERAL_TYPE:
return literal_type_p (type1);
case CPTK_IS_POD:
return pod_type_p (type1);
case CPTK_IS_POLYMORPHIC:
return CLASS_TYPE_P (type1) && TYPE_POLYMORPHIC_P (type1);
case CPTK_IS_SAME_AS:
return same_type_p (type1, type2);
case CPTK_IS_STD_LAYOUT:
return std_layout_type_p (type1);
case CPTK_IS_TRIVIAL:
return trivial_type_p (type1);
case CPTK_IS_TRIVIALLY_ASSIGNABLE:
return is_trivially_xible (MODIFY_EXPR, type1, type2);
case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE:
return is_trivially_xible (INIT_EXPR, type1, type2);
case CPTK_IS_TRIVIALLY_COPYABLE:
return trivially_copyable_p (type1);
case CPTK_IS_UNION:
return type_code1 == UNION_TYPE;
default:
gcc_unreachable ();
return false;
}
}
/* If TYPE is an array of unknown bound, or (possibly cv-qualified)
void, or a complete type, returns true, otherwise false. */
static bool
check_trait_type (tree type)
{
if (type == NULL_TREE)
return true;
if (TREE_CODE (type) == TREE_LIST)
return (check_trait_type (TREE_VALUE (type))
&& check_trait_type (TREE_CHAIN (type)));
if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type)
&& COMPLETE_TYPE_P (TREE_TYPE (type)))
return true;
if (VOID_TYPE_P (type))
return true;
return !!complete_type_or_else (strip_array_types (type), NULL_TREE);
}
/* Process a trait expression. */
tree
finish_trait_expr (cp_trait_kind kind, tree type1, tree type2)
{
if (type1 == error_mark_node
|| type2 == error_mark_node)
return error_mark_node;
if (processing_template_decl)
{
tree trait_expr = make_node (TRAIT_EXPR);
TREE_TYPE (trait_expr) = boolean_type_node;
TRAIT_EXPR_TYPE1 (trait_expr) = type1;
TRAIT_EXPR_TYPE2 (trait_expr) = type2;
TRAIT_EXPR_KIND (trait_expr) = kind;
return trait_expr;
}
switch (kind)
{
case CPTK_HAS_NOTHROW_ASSIGN:
case CPTK_HAS_TRIVIAL_ASSIGN:
case CPTK_HAS_NOTHROW_CONSTRUCTOR:
case CPTK_HAS_TRIVIAL_CONSTRUCTOR:
case CPTK_HAS_NOTHROW_COPY:
case CPTK_HAS_TRIVIAL_COPY:
case CPTK_HAS_TRIVIAL_DESTRUCTOR:
case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS:
case CPTK_HAS_VIRTUAL_DESTRUCTOR:
case CPTK_IS_ABSTRACT:
case CPTK_IS_AGGREGATE:
case CPTK_IS_EMPTY:
case CPTK_IS_FINAL:
case CPTK_IS_LITERAL_TYPE:
case CPTK_IS_POD:
case CPTK_IS_POLYMORPHIC:
case CPTK_IS_STD_LAYOUT:
case CPTK_IS_TRIVIAL:
case CPTK_IS_TRIVIALLY_COPYABLE:
if (!check_trait_type (type1))
return error_mark_node;
break;
case CPTK_IS_TRIVIALLY_ASSIGNABLE:
case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE:
if (!check_trait_type (type1)
|| !check_trait_type (type2))
return error_mark_node;
break;
case CPTK_IS_BASE_OF:
if (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2)
&& !same_type_ignoring_top_level_qualifiers_p (type1, type2)
&& !complete_type_or_else (type2, NULL_TREE))
/* We already issued an error. */
return error_mark_node;
break;
case CPTK_IS_CLASS:
case CPTK_IS_ENUM:
case CPTK_IS_UNION:
case CPTK_IS_SAME_AS:
break;
default:
gcc_unreachable ();
}
return (trait_expr_value (kind, type1, type2)
? boolean_true_node : boolean_false_node);
}
/* Do-nothing variants of functions to handle pragma FLOAT_CONST_DECIMAL64,
which is ignored for C++. */
void
set_float_const_decimal64 (void)
{
}
void
clear_float_const_decimal64 (void)
{
}
bool
float_const_decimal64_p (void)
{
return 0;
}
/* Return true if T designates the implied `this' parameter. */
bool
is_this_parameter (tree t)
{
if (!DECL_P (t) || DECL_NAME (t) != this_identifier)
return false;
gcc_assert (TREE_CODE (t) == PARM_DECL || is_capture_proxy (t)
|| (cp_binding_oracle && TREE_CODE (t) == VAR_DECL));
return true;
}
/* Insert the deduced return type for an auto function. */
void
apply_deduced_return_type (tree fco, tree return_type)
{
tree result;
if (return_type == error_mark_node)
return;
if (LAMBDA_FUNCTION_P (fco))
{
tree lambda = CLASSTYPE_LAMBDA_EXPR (current_class_type);
LAMBDA_EXPR_RETURN_TYPE (lambda) = return_type;
}
if (DECL_CONV_FN_P (fco))
DECL_NAME (fco) = mangle_conv_op_name_for_type (return_type);
TREE_TYPE (fco) = change_return_type (return_type, TREE_TYPE (fco));
result = DECL_RESULT (fco);
if (result == NULL_TREE)
return;
if (TREE_TYPE (result) == return_type)
return;
if (!processing_template_decl && !VOID_TYPE_P (return_type)
&& !complete_type_or_else (return_type, NULL_TREE))
return;
/* We already have a DECL_RESULT from start_preparsed_function.
Now we need to redo the work it and allocate_struct_function
did to reflect the new type. */
gcc_assert (current_function_decl == fco);
result = build_decl (input_location, RESULT_DECL, NULL_TREE,
TYPE_MAIN_VARIANT (return_type));
DECL_ARTIFICIAL (result) = 1;
DECL_IGNORED_P (result) = 1;
cp_apply_type_quals_to_decl (cp_type_quals (return_type),
result);
DECL_RESULT (fco) = result;
if (!processing_template_decl)
{
bool aggr = aggregate_value_p (result, fco);
#ifdef PCC_STATIC_STRUCT_RETURN
cfun->returns_pcc_struct = aggr;
#endif
cfun->returns_struct = aggr;
}
}
/* DECL is a local variable or parameter from the surrounding scope of a
lambda-expression. Returns the decltype for a use of the capture field
for DECL even if it hasn't been captured yet. */
static tree
capture_decltype (tree decl)
{
tree lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl));
/* FIXME do lookup instead of list walk? */
tree cap = value_member (decl, LAMBDA_EXPR_CAPTURE_LIST (lam));
tree type;
if (cap)
type = TREE_TYPE (TREE_PURPOSE (cap));
else
switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam))
{
case CPLD_NONE:
error ("%qD is not captured", decl);
return error_mark_node;
case CPLD_COPY:
type = TREE_TYPE (decl);
if (TREE_CODE (type) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE)
type = TREE_TYPE (type);
break;
case CPLD_REFERENCE:
type = TREE_TYPE (decl);
if (TREE_CODE (type) != REFERENCE_TYPE)
type = build_reference_type (TREE_TYPE (decl));
break;
default:
gcc_unreachable ();
}
if (TREE_CODE (type) != REFERENCE_TYPE)
{
if (!LAMBDA_EXPR_MUTABLE_P (lam))
type = cp_build_qualified_type (type, (cp_type_quals (type)
|TYPE_QUAL_CONST));
type = build_reference_type (type);
}
return type;
}
/* Build a unary fold expression of EXPR over OP. If IS_RIGHT is true,
this is a right unary fold. Otherwise it is a left unary fold. */
static tree
finish_unary_fold_expr (tree expr, int op, tree_code dir)
{
// Build a pack expansion (assuming expr has pack type).
if (!uses_parameter_packs (expr))
{
error_at (location_of (expr), "operand of fold expression has no "
"unexpanded parameter packs");
return error_mark_node;
}
tree pack = make_pack_expansion (expr);
// Build the fold expression.
tree code = build_int_cstu (integer_type_node, abs (op));
tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack);
FOLD_EXPR_MODIFY_P (fold) = (op < 0);
return fold;
}
tree
finish_left_unary_fold_expr (tree expr, int op)
{
return finish_unary_fold_expr (expr, op, UNARY_LEFT_FOLD_EXPR);
}
tree
finish_right_unary_fold_expr (tree expr, int op)
{
return finish_unary_fold_expr (expr, op, UNARY_RIGHT_FOLD_EXPR);
}
/* Build a binary fold expression over EXPR1 and EXPR2. The
associativity of the fold is determined by EXPR1 and EXPR2 (whichever
has an unexpanded parameter pack). */
tree
finish_binary_fold_expr (tree pack, tree init, int op, tree_code dir)
{
pack = make_pack_expansion (pack);
tree code = build_int_cstu (integer_type_node, abs (op));
tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack, init);
FOLD_EXPR_MODIFY_P (fold) = (op < 0);
return fold;
}
tree
finish_binary_fold_expr (tree expr1, tree expr2, int op)
{
// Determine which expr has an unexpanded parameter pack and
// set the pack and initial term.
bool pack1 = uses_parameter_packs (expr1);
bool pack2 = uses_parameter_packs (expr2);
if (pack1 && !pack2)
return finish_binary_fold_expr (expr1, expr2, op, BINARY_RIGHT_FOLD_EXPR);
else if (pack2 && !pack1)
return finish_binary_fold_expr (expr2, expr1, op, BINARY_LEFT_FOLD_EXPR);
else
{
if (pack1)
error ("both arguments in binary fold have unexpanded parameter packs");
else
error ("no unexpanded parameter packs in binary fold");
}
return error_mark_node;
}
/* Finish __builtin_launder (arg). */
tree
finish_builtin_launder (location_t loc, tree arg, tsubst_flags_t complain)
{
tree orig_arg = arg;
if (!type_dependent_expression_p (arg))
arg = decay_conversion (arg, complain);
if (error_operand_p (arg))
return error_mark_node;
if (!type_dependent_expression_p (arg)
&& TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
{
error_at (loc, "non-pointer argument to %<__builtin_launder%>");
return error_mark_node;
}
if (processing_template_decl)
arg = orig_arg;
return build_call_expr_internal_loc (loc, IFN_LAUNDER,
TREE_TYPE (arg), 1, arg);
}
#include "gt-cp-semantics.h"
|
tools.h | int log2i(int x){
int r = 0;
x >>= 1;
while(x){
r++;
x >>= 1;
}
return r;
}
int pow2i(int x){
return (1 << x);
}
void printarray(int *a,int n, const char* msg){
if (n<=32) {
printf("%s: ",msg);
for (int i = 0; i <n; ++i){
printf("%4i ",a[i]);
}
printf("\n");
}
}
void initarray(int *a, int n, const int c){
#pragma omp parallel for
for (int i = 0; i < n; ++i){
a[i] = c*(i+1);
}
}
void validate(int *s, int *sgold, int n){
for (int i = 0; i < n; ++i){
if(s[i] != sgold[i]){
printf("failed\n");
printf("Error s[%i](%i) != sgold[%i](%i)\n", i, s[i], i, sgold[i]);
exit(EXIT_FAILURE);
}
}
}
|
hello_cmake.c | // Reference: https://computing.llnl.gov/tutorials/openMP/samples/C/omp_hello.c
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv)
{
int nthreads, tid;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
printf("Hello Cmake from thread = %d\n", tid);
if(tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
}
|
example_2D.c | /* To compile this program on Linux, try:
make CFLAGS='-std=c99 -Wall' example_2D
To run:
./example_2D; echo $?
It should print 0 if OK.
You can even compile it to run on multicore SMP for free with
make CFLAGS='-std=c99 -fopenmp -Wall' example_2D
To verify there are really some clone() system calls that create the threads:
strace -f ./example_2D ; echo $?
You can notice that the #pragma smecy are ignored (the project is
on-going :-) ) but that the program produces already correct results in
sequential execution and parallel OpenMP execution.
Enjoy!
Ronan.Keryell@hpc-project.com
for ARTEMIS SMECY European project.
*/
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
// Problem size
enum { WIDTH = 500, HEIGHT = 200 };
/** Initialize a 2D array with some progressive values
@param width is the size of the array in the second dimension
@param height is the size of the array in the first dimension
@param[out] array is the array to initialize
Note that we could also use this [out] Doxygen information to avoid
specifying it again in the #pragma...
*/
void init(int width, int height, int array[height][width]) {
// Can be executed in parallel
#pragma omp parallel for
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
// Initialize with stripes:
array[i][j] = (i + 3*j) >> ((i - j) & 7);
}
/** Write the content of an array to Portable Gray Map image format (PGM)
@param[in] filename is the name of the file to write into the image
@param n is the size of the array in the first dimension
@param m is the size of the array in the second dimension
@param[in] array is the array to use as image content. Note we could
infer the [in] information and communication directions directly from
"const" qualifier
*/
void write_pgm_image(const char filename[], int width, int height,
const unsigned char array[height][width]) {
FILE * fp;
char * comments = "# This is an image generated by the " __FILE__
" program.\n"
"# SMECY ARTEMIS European project.\n";
// Open the image file for writing:
if ((fp = fopen(filename, "w")) == NULL) {
perror("Error opening file");
exit(EXIT_FAILURE);
}
/* Write the PGM header which begins with, in ASCII decimal, the
width, the height and the maximum gray value (255 here): */
fprintf(fp,"P5\n%d %d\n%s%d\n", width, height, comments, UCHAR_MAX);
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
// Write a pixel value:
fputc(array[i][j], fp);
// Close the file:
fclose(fp);
}
/* Apply a vertical symmetry to a subsquare in an image
*/
void square_symmetry(int width, int height, int image[height][width],
int square_size, int x_offset, int y_offset) {
// Can be executed in parallel
#pragma omp parallel for
for(int i = 0; i < square_size/2; i++)
for(int j = 0; j < square_size; j++) {
int tmp = image[y_offset + i][x_offset + j];
image[y_offset + i][x_offset + j] =
image[y_offset + square_size - i][x_offset + j];
image[y_offset + square_size - i][x_offset + j] = tmp;
}
}
/** Normalize an array of integer values into an array of unsigned char
This is typically used to generate a gray image from arbitrary data.
*/
void normalize_to_char(int width, int height, int array[height][width],
unsigned char output[height][width]) {
/* First find the minimum and maximum values of array for
later normalization: */
// Initialize the minimum value to the biggest integer:
int minimum = INT_MAX;
// Initialize the maximum value to the smallest integer:
int maximum = INT_MIN;
#pragma omp parallel for reduction(min:minimum) reduction(max:maximum)
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++) {
int v = array[i][j];
if (v < minimum) minimum = v;
else if (v > maximum) maximum = v;
}
// Now do the normalization
float f = UCHAR_MAX/(float)(maximum - minimum);
#pragma omp parallel for
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
output[i][j] = (array[i][j] - minimum)*f;
}
/* The main host program controlling and representing the whole
application */
int main(int argc, char* argv[]) {
int image[HEIGHT][WIDTH];
unsigned char output[HEIGHT][WIDTH];
// Initialize with some values
init(WIDTH, HEIGHT, image);
#pragma omp parallel sections
{
// On one processor
// We rewrite a small part of image:
#pragma smecy map(PE, 0) arg(3, inout, [HEIGHT][WIDTH] \
/[HEIGHT/3:HEIGHT/3 + HEIGHT/2 - 1] \
[WIDTH/8:WIDTH/8 + HEIGHT/2 - 1])
square_symmetry(WIDTH, HEIGHT, image, HEIGHT/2, WIDTH/8, HEIGHT/3);
// On another processor
#pragma omp section
// Here let the compiler to guess the array size
#pragma smecy map(PE, 1) arg(3, inout, /[HEIGHT/4:HEIGHT/4 + HEIGHT/2 - 1] \
[3*WIDTH/8:3*WIDTH/8 + HEIGHT/2 - 1])
square_symmetry(WIDTH, HEIGHT, image, HEIGHT/2, 3*WIDTH/4, HEIGHT/4);
// On another processor
#pragma omp section
// Here let the compiler to guess the array size
#pragma smecy map(PE, 1) arg(3, inout, /[2*HEIGHT/5:2*HEIGHT/5 + HEIGHT/2 - 1] \
[WIDTH/2:WIDTH/2 + HEIGHT/2 - 1])
square_symmetry(WIDTH, HEIGHT, image, HEIGHT/2, WIDTH/2, 2*HEIGHT/5);
}
// Here there is a synchronization because of the parallel part end
// Since there
normalize_to_char(WIDTH, HEIGHT, image, output);
write_pgm_image("output.pgm", WIDTH, HEIGHT, output);
return EXIT_SUCCESS;
}
|
blake2bp.c | /*
BLAKE2 reference source code package - optimized C implementations
Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = offset;
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = 0;
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
blake2b_final( S->R, out, outlen );
return 0;
}
int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
/* Verify parameters */
if ( NULL == in ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key ) keylen = 0;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[id__], in__, len );
}
blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; // Mark as last node
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
blake2b_final( FS, out, outlen );
return 0;
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
//blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES );
blake2bp_state S[1];
blake2bp_init_key( S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES );
blake2bp_update( S, buf, i );
blake2bp_final( S, hash, BLAKE2B_OUTBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
reto.c | /******************************************************************************
* FILE: mpi_mm.c
* DESCRIPTION:
* MPI Matrix Multiply - C Version
* In this code, the master task distributes a matrix multiply
* operation to numtasks-1 worker tasks.
* NOTE: C and Fortran versions of this code differ because of the way
* arrays are stored/passed. C arrays are row-major order but Fortran
* arrays are column-major order.
* AUTHOR: Blaise Barney. Adapted from Ros Leibensperger, Cornell Theory
* Center. Converted to MPI: George L. Gusciora, MHPCC (1/95)
* LAST REVISED: 04/13/05
******************************************************************************/
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define NRA 62 /* number of rows in matrix A */
#define NCA 15 /* number of columns in matrix A */
#define NCB 7 /* number of columns in matrix B */
#define MASTER 0 /* taskid of first task */
#define FROM_MASTER 1 /* setting a message type */
#define FROM_WORKER 2 /* setting a message type */
int main (int argc, char *argv[])
{
int numtasks, /* number of tasks in partition */
taskid, /* a task identifier */
numworkers, /* number of worker tasks */
source, /* task id of message source */
dest, /* task id of message destination */
mtype, /* message type */
rows, /* rows of matrix A sent to each worker */
averow, extra, offset, /* used to determine rows sent to each worker */
i, j, k, rc; /* misc */
double a[NRA][NCA], /* matrix A to be multiplied */
b[NCA][NCB], /* matrix B to be multiplied */
c[NRA][NCB]; /* result matrix C */
double start,end;
MPI_Status status;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
if (numtasks < 2 ) {
printf("Need at least two MPI tasks. Quitting...\n");
MPI_Abort(MPI_COMM_WORLD, rc);
exit(1);
}
numworkers = numtasks-1;
start = MPI_Wtime();
/**************************** master task ************************************/
if (taskid == MASTER)
{
printf("mpi_mm has started with %d tasks.\n",numtasks);
printf("Initializing arrays...\n");
for (i=0; i<NRA; i++)
for (j=0; j<NCA; j++)
a[i][j]= i+j;
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j]= i*j;
/* Send matrix data to the worker tasks */
averow = NRA/numworkers;
extra = NRA%numworkers;
offset = 0;
mtype = FROM_MASTER;
for (dest=1; dest<=numworkers; dest++)
{
rows = (dest <= extra) ? averow+1 : averow;
printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset);
MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&a[offset][0], rows*NCA, MPI_DOUBLE, dest, mtype,
MPI_COMM_WORLD);
MPI_Send(&b, NCA*NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
offset = offset + rows;
}
/* Receive results from worker tasks */
mtype = FROM_WORKER;
for (i=1; i<=numworkers; i++)
{
source = i;
MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&c[offset][0], rows*NCB, MPI_DOUBLE, source, mtype,
MPI_COMM_WORLD, &status);
printf("Received results from task %d\n",source);
}
/* Print results */
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<NRA; i++)
{
printf("\n");
for (j=0; j<NCB; j++)
printf("%6.2f ", c[i][j]);
}
printf("\n******************************************************\n");
printf ("Done.\n");
}
/**************************** worker task ************************************/
if (taskid > MASTER)
{
mtype = FROM_MASTER;
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
// #pragma omp parallel shared(c,a,b) private(k,i)
// {
for (k=0; k<NCB; k++)
for (i=0; i<rows; i++)
{
c[i][k] = 0.0;
for (j=0; j<NCA; j++)
c[i][k] = a[i][j] + b[j][k];
}
// }
mtype = FROM_WORKER;
MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&c, rows*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
}
end = MPI_Wtime();
MPI_Finalize();
printf("diff time = %f\n",end - start);
}
|
pi-v3.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#include "extrae_user_events.h"
#define PROGRAM 1000
#define PI_COMPUTATION 1
#define END 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if _DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if _DEBUG_
start= omp_get_wtime();
#else
Extrae_event (PROGRAM, PI_COMPUTATION);
#endif
/* do computation -- using all available threads */
// WARNING : incorrect code
#pragma omp parallel private(i, x)
{
int id = omp_get_thread_num();
int num_threads = omp_get_num_threads();
// interleaved execution of iterations among threads
for (i=id; i < num_steps; i=i+num_threads) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if _DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#else
Extrae_event (PROGRAM, END);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
mmp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define NRA 1000 /* number of rows in matrix A */
#define NCA 1000 /* number of columns in matrix A */
#define NCB 1000 /* number of columns in matrix B */
#define printf(...)
int main () {
int i, j, k;
/* matrix A to be multiplied */
double **a = (double **) malloc(sizeof(double *) * NRA);
for (i = 0; i < NRA; i ++)
a[i] = (double *) malloc(sizeof(double) * NCA);
/* matrix B to be multiplied */
double **b = (double **) malloc(sizeof(double *) * NCA);
for (i = 0; i < NCA; i ++)
b[i] = (double *) malloc(sizeof(double) * NCB);
/* result matrix C */
double **c = (double **) malloc(sizeof(double *) * NRA);
for (i = 0; i < NRA; i ++)
c[i] = (double *) malloc(sizeof(double) * NCB);
printf("Initializing matrices...\n");
/*** Initialize matrices ***/
for (i=0; i<NRA; i++)
for (j=0; j<NCA; j++)
a[i][j]= i+j;
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j]= i*j;
for (i=0; i<NRA; i++)
for (j=0; j<NCB; j++)
c[i][j]= 0;
/*** Do the matrix-matrix multiplication ***/
#pragma omp parallel for private(i, j, k) schedule(dynamic)
for (i=0; i<NRA; i++)
for(j=0; j<NCB; j++)
for (k=0; k<NCA; k++)
c[i][j] += a[i][k] * b[k][j];
/*** Print results ***/
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<NRA; i++)
{
for (j=0; j<NCB; j++)
printf("%10.2f ", c[i][j]);
printf("\n");
}
printf("******************************************************\n");
printf ("Done.\n");
for (i = 0; i < NRA; i ++)
free(a[i]);
free(a);
for (i = 0; i < NCA; i ++)
free(b[i]);
free(b);
for (i = 0; i < NRA; i ++)
free(c[i]);
free(c);
return 0;
}
|
parser.c | /* C++ Parser.
Copyright (C) 2000, 2001, 2002, 2003, 2004,
2005 Free Software Foundation, Inc.
Written by Mark Mitchell <mark@codesourcery.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "dyn-string.h"
#include "varray.h"
#include "cpplib.h"
#include "tree.h"
#include "cp-tree.h"
#include "c-pragma.h"
#include "decl.h"
#include "flags.h"
#include "diagnostic.h"
#include "toplev.h"
#include "output.h"
#include "target.h"
#include "cgraph.h"
#include "c-common.h"
/* The lexer. */
/* The cp_lexer_* routines mediate between the lexer proper (in libcpp
and c-lex.c) and the C++ parser. */
/* A token's value and its associated deferred access checks and
qualifying scope. */
struct tree_check GTY(())
{
/* The value associated with the token. */
tree value;
/* The checks that have been associated with value. */
VEC (deferred_access_check, gc)* checks;
/* The token's qualifying scope (used when it is a
CPP_NESTED_NAME_SPECIFIER). */
tree qualifying_scope;
};
/* A C++ token. */
typedef struct cp_token GTY (())
{
/* The kind of token. */
ENUM_BITFIELD (cpp_ttype) type : 8;
/* If this token is a keyword, this value indicates which keyword.
Otherwise, this value is RID_MAX. */
ENUM_BITFIELD (rid) keyword : 8;
/* Token flags. */
unsigned char flags;
/* Identifier for the pragma. */
ENUM_BITFIELD (pragma_kind) pragma_kind : 6;
/* True if this token is from a system header. */
BOOL_BITFIELD in_system_header : 1;
/* True if this token is from a context where it is implicitly extern "C" */
BOOL_BITFIELD implicit_extern_c : 1;
/* True for a CPP_NAME token that is not a keyword (i.e., for which
KEYWORD is RID_MAX) iff this name was looked up and found to be
ambiguous. An error has already been reported. */
BOOL_BITFIELD ambiguous_p : 1;
/* The input file stack index at which this token was found. */
unsigned input_file_stack_index : INPUT_FILE_STACK_BITS;
/* The value associated with this token, if any. */
union cp_token_value {
/* Used for CPP_NESTED_NAME_SPECIFIER and CPP_TEMPLATE_ID. */
struct tree_check* GTY((tag ("1"))) tree_check_value;
/* Use for all other tokens. */
tree GTY((tag ("0"))) value;
} GTY((desc ("(%1.type == CPP_TEMPLATE_ID) || (%1.type == CPP_NESTED_NAME_SPECIFIER)"))) u;
/* The location at which this token was found. */
location_t location;
} cp_token;
/* We use a stack of token pointer for saving token sets. */
typedef struct cp_token *cp_token_position;
DEF_VEC_P (cp_token_position);
DEF_VEC_ALLOC_P (cp_token_position,heap);
static const cp_token eof_token =
{
CPP_EOF, RID_MAX, 0, PRAGMA_NONE, 0, 0, false, 0, { NULL },
#if USE_MAPPED_LOCATION
0
#else
{0, 0}
#endif
};
/* The cp_lexer structure represents the C++ lexer. It is responsible
for managing the token stream from the preprocessor and supplying
it to the parser. Tokens are never added to the cp_lexer after
it is created. */
typedef struct cp_lexer GTY (())
{
/* The memory allocated for the buffer. NULL if this lexer does not
own the token buffer. */
cp_token * GTY ((length ("%h.buffer_length"))) buffer;
/* If the lexer owns the buffer, this is the number of tokens in the
buffer. */
size_t buffer_length;
/* A pointer just past the last available token. The tokens
in this lexer are [buffer, last_token). */
cp_token_position GTY ((skip)) last_token;
/* The next available token. If NEXT_TOKEN is &eof_token, then there are
no more available tokens. */
cp_token_position GTY ((skip)) next_token;
/* A stack indicating positions at which cp_lexer_save_tokens was
called. The top entry is the most recent position at which we
began saving tokens. If the stack is non-empty, we are saving
tokens. */
VEC(cp_token_position,heap) *GTY ((skip)) saved_tokens;
/* The next lexer in a linked list of lexers. */
struct cp_lexer *next;
/* True if we should output debugging information. */
bool debugging_p;
/* True if we're in the context of parsing a pragma, and should not
increment past the end-of-line marker. */
bool in_pragma;
} cp_lexer;
/* cp_token_cache is a range of tokens. There is no need to represent
allocate heap memory for it, since tokens are never removed from the
lexer's array. There is also no need for the GC to walk through
a cp_token_cache, since everything in here is referenced through
a lexer. */
typedef struct cp_token_cache GTY(())
{
/* The beginning of the token range. */
cp_token * GTY((skip)) first;
/* Points immediately after the last token in the range. */
cp_token * GTY ((skip)) last;
} cp_token_cache;
/* Prototypes. */
static cp_lexer *cp_lexer_new_main
(void);
static cp_lexer *cp_lexer_new_from_tokens
(cp_token_cache *tokens);
static void cp_lexer_destroy
(cp_lexer *);
static int cp_lexer_saving_tokens
(const cp_lexer *);
static cp_token_position cp_lexer_token_position
(cp_lexer *, bool);
static cp_token *cp_lexer_token_at
(cp_lexer *, cp_token_position);
static void cp_lexer_get_preprocessor_token
(cp_lexer *, cp_token *);
static inline cp_token *cp_lexer_peek_token
(cp_lexer *);
static cp_token *cp_lexer_peek_nth_token
(cp_lexer *, size_t);
static inline bool cp_lexer_next_token_is
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_not
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_keyword
(cp_lexer *, enum rid);
static cp_token *cp_lexer_consume_token
(cp_lexer *);
static void cp_lexer_purge_token
(cp_lexer *);
static void cp_lexer_purge_tokens_after
(cp_lexer *, cp_token_position);
static void cp_lexer_save_tokens
(cp_lexer *);
static void cp_lexer_commit_tokens
(cp_lexer *);
static void cp_lexer_rollback_tokens
(cp_lexer *);
#ifdef ENABLE_CHECKING
static void cp_lexer_print_token
(FILE *, cp_token *);
static inline bool cp_lexer_debugging_p
(cp_lexer *);
static void cp_lexer_start_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
static void cp_lexer_stop_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
#else
/* If we define cp_lexer_debug_stream to NULL it will provoke warnings
about passing NULL to functions that require non-NULL arguments
(fputs, fprintf). It will never be used, so all we need is a value
of the right type that's guaranteed not to be NULL. */
#define cp_lexer_debug_stream stdout
#define cp_lexer_print_token(str, tok) (void) 0
#define cp_lexer_debugging_p(lexer) 0
#endif /* ENABLE_CHECKING */
static cp_token_cache *cp_token_cache_new
(cp_token *, cp_token *);
static void cp_parser_initial_pragma
(cp_token *);
/* Manifest constants. */
#define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token))
#define CP_SAVED_TOKEN_STACK 5
/* A token type for keywords, as opposed to ordinary identifiers. */
#define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1))
/* A token type for template-ids. If a template-id is processed while
parsing tentatively, it is replaced with a CPP_TEMPLATE_ID token;
the value of the CPP_TEMPLATE_ID is whatever was returned by
cp_parser_template_id. */
#define CPP_TEMPLATE_ID ((enum cpp_ttype) (CPP_KEYWORD + 1))
/* A token type for nested-name-specifiers. If a
nested-name-specifier is processed while parsing tentatively, it is
replaced with a CPP_NESTED_NAME_SPECIFIER token; the value of the
CPP_NESTED_NAME_SPECIFIER is whatever was returned by
cp_parser_nested_name_specifier_opt. */
#define CPP_NESTED_NAME_SPECIFIER ((enum cpp_ttype) (CPP_TEMPLATE_ID + 1))
/* A token type for tokens that are not tokens at all; these are used
to represent slots in the array where there used to be a token
that has now been deleted. */
#define CPP_PURGED ((enum cpp_ttype) (CPP_NESTED_NAME_SPECIFIER + 1))
/* The number of token types, including C++-specific ones. */
#define N_CP_TTYPES ((int) (CPP_PURGED + 1))
/* Variables. */
#ifdef ENABLE_CHECKING
/* The stream to which debugging output should be written. */
static FILE *cp_lexer_debug_stream;
#endif /* ENABLE_CHECKING */
/* Create a new main C++ lexer, the lexer that gets tokens from the
preprocessor. */
static cp_lexer *
cp_lexer_new_main (void)
{
cp_token first_token;
cp_lexer *lexer;
cp_token *pos;
size_t alloc;
size_t space;
cp_token *buffer;
/* It's possible that parsing the first pragma will load a PCH file,
which is a GC collection point. So we have to do that before
allocating any memory. */
cp_parser_initial_pragma (&first_token);
/* Tell c_lex_with_flags not to merge string constants. */
c_lex_return_raw_strings = true;
c_common_no_more_pch ();
/* Allocate the memory. */
lexer = GGC_CNEW (cp_lexer);
#ifdef ENABLE_CHECKING
/* Initially we are not debugging. */
lexer->debugging_p = false;
#endif /* ENABLE_CHECKING */
lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
CP_SAVED_TOKEN_STACK);
/* Create the buffer. */
alloc = CP_LEXER_BUFFER_SIZE;
buffer = GGC_NEWVEC (cp_token, alloc);
/* Put the first token in the buffer. */
space = alloc;
pos = buffer;
*pos = first_token;
/* Get the remaining tokens from the preprocessor. */
while (pos->type != CPP_EOF)
{
pos++;
if (!--space)
{
space = alloc;
alloc *= 2;
buffer = GGC_RESIZEVEC (cp_token, buffer, alloc);
pos = buffer + space;
}
cp_lexer_get_preprocessor_token (lexer, pos);
}
lexer->buffer = buffer;
lexer->buffer_length = alloc - space;
lexer->last_token = pos;
lexer->next_token = lexer->buffer_length ? buffer : (cp_token *)&eof_token;
/* Subsequent preprocessor diagnostics should use compiler
diagnostic functions to get the compiler source location. */
cpp_get_options (parse_in)->client_diagnostic = true;
cpp_get_callbacks (parse_in)->error = cp_cpp_error;
gcc_assert (lexer->next_token->type != CPP_PURGED);
return lexer;
}
/* Create a new lexer whose token stream is primed with the tokens in
CACHE. When these tokens are exhausted, no new tokens will be read. */
static cp_lexer *
cp_lexer_new_from_tokens (cp_token_cache *cache)
{
cp_token *first = cache->first;
cp_token *last = cache->last;
cp_lexer *lexer = GGC_CNEW (cp_lexer);
/* We do not own the buffer. */
lexer->buffer = NULL;
lexer->buffer_length = 0;
lexer->next_token = first == last ? (cp_token *)&eof_token : first;
lexer->last_token = last;
lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
CP_SAVED_TOKEN_STACK);
#ifdef ENABLE_CHECKING
/* Initially we are not debugging. */
lexer->debugging_p = false;
#endif
gcc_assert (lexer->next_token->type != CPP_PURGED);
return lexer;
}
/* Frees all resources associated with LEXER. */
static void
cp_lexer_destroy (cp_lexer *lexer)
{
if (lexer->buffer)
ggc_free (lexer->buffer);
VEC_free (cp_token_position, heap, lexer->saved_tokens);
ggc_free (lexer);
}
/* Returns nonzero if debugging information should be output. */
#ifdef ENABLE_CHECKING
static inline bool
cp_lexer_debugging_p (cp_lexer *lexer)
{
return lexer->debugging_p;
}
#endif /* ENABLE_CHECKING */
static inline cp_token_position
cp_lexer_token_position (cp_lexer *lexer, bool previous_p)
{
gcc_assert (!previous_p || lexer->next_token != &eof_token);
return lexer->next_token - previous_p;
}
static inline cp_token *
cp_lexer_token_at (cp_lexer *lexer ATTRIBUTE_UNUSED, cp_token_position pos)
{
return pos;
}
/* nonzero if we are presently saving tokens. */
static inline int
cp_lexer_saving_tokens (const cp_lexer* lexer)
{
return VEC_length (cp_token_position, lexer->saved_tokens) != 0;
}
/* Store the next token from the preprocessor in *TOKEN. Return true
if we reach EOF. */
static void
cp_lexer_get_preprocessor_token (cp_lexer *lexer ATTRIBUTE_UNUSED ,
cp_token *token)
{
static int is_extern_c = 0;
/* Get a new token from the preprocessor. */
token->type
= c_lex_with_flags (&token->u.value, &token->location, &token->flags);
token->input_file_stack_index = input_file_stack_tick;
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
token->in_system_header = in_system_header;
/* On some systems, some header files are surrounded by an
implicit extern "C" block. Set a flag in the token if it
comes from such a header. */
is_extern_c += pending_lang_change;
pending_lang_change = 0;
token->implicit_extern_c = is_extern_c > 0;
/* Check to see if this token is a keyword. */
if (token->type == CPP_NAME)
{
if (C_IS_RESERVED_WORD (token->u.value))
{
/* Mark this token as a keyword. */
token->type = CPP_KEYWORD;
/* Record which keyword. */
token->keyword = C_RID_CODE (token->u.value);
/* Update the value. Some keywords are mapped to particular
entities, rather than simply having the value of the
corresponding IDENTIFIER_NODE. For example, `__const' is
mapped to `const'. */
token->u.value = ridpointers[token->keyword];
}
else
{
token->ambiguous_p = false;
token->keyword = RID_MAX;
}
}
/* Handle Objective-C++ keywords. */
else if (token->type == CPP_AT_NAME)
{
token->type = CPP_KEYWORD;
switch (C_RID_CODE (token->u.value))
{
/* Map 'class' to '@class', 'private' to '@private', etc. */
case RID_CLASS: token->keyword = RID_AT_CLASS; break;
case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break;
case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break;
case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break;
case RID_THROW: token->keyword = RID_AT_THROW; break;
case RID_TRY: token->keyword = RID_AT_TRY; break;
case RID_CATCH: token->keyword = RID_AT_CATCH; break;
default: token->keyword = C_RID_CODE (token->u.value);
}
}
else if (token->type == CPP_PRAGMA)
{
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = TREE_INT_CST_LOW (token->u.value);
token->u.value = NULL_TREE;
}
}
/* Update the globals input_location and in_system_header and the
input file stack from TOKEN. */
static inline void
cp_lexer_set_source_position_from_token (cp_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
in_system_header = token->in_system_header;
restore_input_file_stack (token->input_file_stack_index);
}
}
/* Return a pointer to the next token in the token stream, but do not
consume it. */
static inline cp_token *
cp_lexer_peek_token (cp_lexer *lexer)
{
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token);
putc ('\n', cp_lexer_debug_stream);
}
return lexer->next_token;
}
/* Return true if the next token has the indicated TYPE. */
static inline bool
cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type)
{
return cp_lexer_peek_token (lexer)->type == type;
}
/* Return true if the next token does not have the indicated TYPE. */
static inline bool
cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type)
{
return !cp_lexer_next_token_is (lexer, type);
}
/* Return true if the next token is the indicated KEYWORD. */
static inline bool
cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword)
{
return cp_lexer_peek_token (lexer)->keyword == keyword;
}
/* Return true if the next token is a keyword for a decl-specifier. */
static bool
cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer)
{
cp_token *token;
token = cp_lexer_peek_token (lexer);
switch (token->keyword)
{
/* Storage classes. */
case RID_AUTO:
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Elaborated type specifiers. */
case RID_ENUM:
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
case RID_TYPENAME:
/* Simple type specifiers. */
case RID_CHAR:
case RID_WCHAR:
case RID_BOOL:
case RID_SHORT:
case RID_INT:
case RID_LONG:
case RID_SIGNED:
case RID_UNSIGNED:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
/* GNU extensions. */
case RID_ATTRIBUTE:
case RID_TYPEOF:
return true;
default:
return false;
}
}
/* Return a pointer to the Nth token in the token stream. If N is 1,
then this is precisely equivalent to cp_lexer_peek_token (except
that it is not inline). One would like to disallow that case, but
there is one case (cp_parser_nth_token_starts_template_id) where
the caller passes a variable for N and it might be 1. */
static cp_token *
cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n)
{
cp_token *token;
/* N is 1-based, not zero-based. */
gcc_assert (n > 0);
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream,
"cp_lexer: peeking ahead %ld at token: ", (long)n);
--n;
token = lexer->next_token;
gcc_assert (!n || token != &eof_token);
while (n != 0)
{
++token;
if (token == lexer->last_token)
{
token = (cp_token *)&eof_token;
break;
}
if (token->type != CPP_PURGED)
--n;
}
if (cp_lexer_debugging_p (lexer))
{
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Return the next token, and advance the lexer's next_token pointer
to point to the next non-purged token. */
static cp_token *
cp_lexer_consume_token (cp_lexer* lexer)
{
cp_token *token = lexer->next_token;
gcc_assert (token != &eof_token);
gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL);
do
{
lexer->next_token++;
if (lexer->next_token == lexer->last_token)
{
lexer->next_token = (cp_token *)&eof_token;
break;
}
}
while (lexer->next_token->type == CPP_PURGED);
cp_lexer_set_source_position_from_token (token);
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Permanently remove the next token from the token stream, and
advance the next_token pointer to refer to the next non-purged
token. */
static void
cp_lexer_purge_token (cp_lexer *lexer)
{
cp_token *tok = lexer->next_token;
gcc_assert (tok != &eof_token);
tok->type = CPP_PURGED;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
do
{
tok++;
if (tok == lexer->last_token)
{
tok = (cp_token *)&eof_token;
break;
}
}
while (tok->type == CPP_PURGED);
lexer->next_token = tok;
}
/* Permanently remove all tokens after TOK, up to, but not
including, the token that will be returned next by
cp_lexer_peek_token. */
static void
cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok)
{
cp_token *peek = lexer->next_token;
if (peek == &eof_token)
peek = lexer->last_token;
gcc_assert (tok < peek);
for ( tok += 1; tok != peek; tok += 1)
{
tok->type = CPP_PURGED;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
}
}
/* Begin saving tokens. All tokens consumed after this point will be
preserved. */
static void
cp_lexer_save_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n");
VEC_safe_push (cp_token_position, heap,
lexer->saved_tokens, lexer->next_token);
}
/* Commit to the portion of the token stream most recently saved. */
static void
cp_lexer_commit_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n");
VEC_pop (cp_token_position, lexer->saved_tokens);
}
/* Return all tokens saved since the last call to cp_lexer_save_tokens
to the token stream. Stop saving tokens. */
static void
cp_lexer_rollback_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n");
lexer->next_token = VEC_pop (cp_token_position, lexer->saved_tokens);
}
/* Print a representation of the TOKEN on the STREAM. */
#ifdef ENABLE_CHECKING
static void
cp_lexer_print_token (FILE * stream, cp_token *token)
{
/* We don't use cpp_type2name here because the parser defines
a few tokens of its own. */
static const char *const token_names[] = {
/* cpplib-defined token types */
#define OP(e, s) #e,
#define TK(e, s) #e,
TTYPE_TABLE
#undef OP
#undef TK
/* C++ parser token types - see "Manifest constants", above. */
"KEYWORD",
"TEMPLATE_ID",
"NESTED_NAME_SPECIFIER",
"PURGED"
};
/* If we have a name for the token, print it out. Otherwise, we
simply give the numeric code. */
gcc_assert (token->type < ARRAY_SIZE(token_names));
fputs (token_names[token->type], stream);
/* For some tokens, print the associated data. */
switch (token->type)
{
case CPP_KEYWORD:
/* Some keywords have a value that is not an IDENTIFIER_NODE.
For example, `struct' is mapped to an INTEGER_CST. */
if (TREE_CODE (token->u.value) != IDENTIFIER_NODE)
break;
/* else fall through */
case CPP_NAME:
fputs (IDENTIFIER_POINTER (token->u.value), stream);
break;
case CPP_STRING:
case CPP_WSTRING:
fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value));
break;
default:
break;
}
}
/* Start emitting debugging information. */
static void
cp_lexer_start_debugging (cp_lexer* lexer)
{
lexer->debugging_p = true;
}
/* Stop emitting debugging information. */
static void
cp_lexer_stop_debugging (cp_lexer* lexer)
{
lexer->debugging_p = false;
}
#endif /* ENABLE_CHECKING */
/* Create a new cp_token_cache, representing a range of tokens. */
static cp_token_cache *
cp_token_cache_new (cp_token *first, cp_token *last)
{
cp_token_cache *cache = GGC_NEW (cp_token_cache);
cache->first = first;
cache->last = last;
return cache;
}
/* Decl-specifiers. */
/* Set *DECL_SPECS to represent an empty decl-specifier-seq. */
static void
clear_decl_specs (cp_decl_specifier_seq *decl_specs)
{
memset (decl_specs, 0, sizeof (cp_decl_specifier_seq));
}
/* Declarators. */
/* Nothing other than the parser should be creating declarators;
declarators are a semi-syntactic representation of C++ entities.
Other parts of the front end that need to create entities (like
VAR_DECLs or FUNCTION_DECLs) should do that directly. */
static cp_declarator *make_call_declarator
(cp_declarator *, cp_parameter_declarator *, cp_cv_quals, tree);
static cp_declarator *make_array_declarator
(cp_declarator *, tree);
static cp_declarator *make_pointer_declarator
(cp_cv_quals, cp_declarator *);
static cp_declarator *make_reference_declarator
(cp_cv_quals, cp_declarator *);
static cp_parameter_declarator *make_parameter_declarator
(cp_decl_specifier_seq *, cp_declarator *, tree);
static cp_declarator *make_ptrmem_declarator
(cp_cv_quals, tree, cp_declarator *);
/* An erroneous declarator. */
static cp_declarator *cp_error_declarator;
/* The obstack on which declarators and related data structures are
allocated. */
static struct obstack declarator_obstack;
/* Alloc BYTES from the declarator memory pool. */
static inline void *
alloc_declarator (size_t bytes)
{
return obstack_alloc (&declarator_obstack, bytes);
}
/* Allocate a declarator of the indicated KIND. Clear fields that are
common to all declarators. */
static cp_declarator *
make_declarator (cp_declarator_kind kind)
{
cp_declarator *declarator;
declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator));
declarator->kind = kind;
declarator->attributes = NULL_TREE;
declarator->declarator = NULL;
return declarator;
}
/* Make a declarator for a generalized identifier. If
QUALIFYING_SCOPE is non-NULL, the identifier is
QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just
UNQUALIFIED_NAME. SFK indicates the kind of special function this
is, if any. */
static cp_declarator *
make_id_declarator (tree qualifying_scope, tree unqualified_name,
special_function_kind sfk)
{
cp_declarator *declarator;
/* It is valid to write:
class C { void f(); };
typedef C D;
void D::f();
The standard is not clear about whether `typedef const C D' is
legal; as of 2002-09-15 the committee is considering that
question. EDG 3.0 allows that syntax. Therefore, we do as
well. */
if (qualifying_scope && TYPE_P (qualifying_scope))
qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope);
gcc_assert (TREE_CODE (unqualified_name) == IDENTIFIER_NODE
|| TREE_CODE (unqualified_name) == BIT_NOT_EXPR
|| TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR);
declarator = make_declarator (cdk_id);
declarator->u.id.qualifying_scope = qualifying_scope;
declarator->u.id.unqualified_name = unqualified_name;
declarator->u.id.sfk = sfk;
return declarator;
}
/* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list
of modifiers such as const or volatile to apply to the pointer
type, represented as identifiers. */
cp_declarator *
make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_pointer);
declarator->declarator = target;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = NULL_TREE;
return declarator;
}
/* Like make_pointer_declarator -- but for references. */
cp_declarator *
make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_reference);
declarator->declarator = target;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = NULL_TREE;
return declarator;
}
/* Like make_pointer_declarator -- but for a pointer to a non-static
member of CLASS_TYPE. */
cp_declarator *
make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type,
cp_declarator *pointee)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_ptrmem);
declarator->declarator = pointee;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = class_type;
return declarator;
}
/* Make a declarator for the function given by TARGET, with the
indicated PARMS. The CV_QUALIFIERS aply to the function, as in
"const"-qualified member function. The EXCEPTION_SPECIFICATION
indicates what exceptions can be thrown. */
cp_declarator *
make_call_declarator (cp_declarator *target,
cp_parameter_declarator *parms,
cp_cv_quals cv_qualifiers,
tree exception_specification)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_function);
declarator->declarator = target;
declarator->u.function.parameters = parms;
declarator->u.function.qualifiers = cv_qualifiers;
declarator->u.function.exception_specification = exception_specification;
return declarator;
}
/* Make a declarator for an array of BOUNDS elements, each of which is
defined by ELEMENT. */
cp_declarator *
make_array_declarator (cp_declarator *element, tree bounds)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_array);
declarator->declarator = element;
declarator->u.array.bounds = bounds;
return declarator;
}
cp_parameter_declarator *no_parameters;
/* Create a parameter declarator with the indicated DECL_SPECIFIERS,
DECLARATOR and DEFAULT_ARGUMENT. */
cp_parameter_declarator *
make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree default_argument)
{
cp_parameter_declarator *parameter;
parameter = ((cp_parameter_declarator *)
alloc_declarator (sizeof (cp_parameter_declarator)));
parameter->next = NULL;
if (decl_specifiers)
parameter->decl_specifiers = *decl_specifiers;
else
clear_decl_specs (¶meter->decl_specifiers);
parameter->declarator = declarator;
parameter->default_argument = default_argument;
parameter->ellipsis_p = false;
return parameter;
}
/* Returns true iff DECLARATOR is a declaration for a function. */
static bool
function_declarator_p (const cp_declarator *declarator)
{
while (declarator)
{
if (declarator->kind == cdk_function
&& declarator->declarator->kind == cdk_id)
return true;
if (declarator->kind == cdk_id
|| declarator->kind == cdk_error)
return false;
declarator = declarator->declarator;
}
return false;
}
/* The parser. */
/* Overview
--------
A cp_parser parses the token stream as specified by the C++
grammar. Its job is purely parsing, not semantic analysis. For
example, the parser breaks the token stream into declarators,
expressions, statements, and other similar syntactic constructs.
It does not check that the types of the expressions on either side
of an assignment-statement are compatible, or that a function is
not declared with a parameter of type `void'.
The parser invokes routines elsewhere in the compiler to perform
semantic analysis and to build up the abstract syntax tree for the
code processed.
The parser (and the template instantiation code, which is, in a
way, a close relative of parsing) are the only parts of the
compiler that should be calling push_scope and pop_scope, or
related functions. The parser (and template instantiation code)
keeps track of what scope is presently active; everything else
should simply honor that. (The code that generates static
initializers may also need to set the scope, in order to check
access control correctly when emitting the initializers.)
Methodology
-----------
The parser is of the standard recursive-descent variety. Upcoming
tokens in the token stream are examined in order to determine which
production to use when parsing a non-terminal. Some C++ constructs
require arbitrary look ahead to disambiguate. For example, it is
impossible, in the general case, to tell whether a statement is an
expression or declaration without scanning the entire statement.
Therefore, the parser is capable of "parsing tentatively." When the
parser is not sure what construct comes next, it enters this mode.
Then, while we attempt to parse the construct, the parser queues up
error messages, rather than issuing them immediately, and saves the
tokens it consumes. If the construct is parsed successfully, the
parser "commits", i.e., it issues any queued error messages and
the tokens that were being preserved are permanently discarded.
If, however, the construct is not parsed successfully, the parser
rolls back its state completely so that it can resume parsing using
a different alternative.
Future Improvements
-------------------
The performance of the parser could probably be improved substantially.
We could often eliminate the need to parse tentatively by looking ahead
a little bit. In some places, this approach might not entirely eliminate
the need to parse tentatively, but it might still speed up the average
case. */
/* Flags that are passed to some parsing functions. These values can
be bitwise-ored together. */
typedef enum cp_parser_flags
{
/* No flags. */
CP_PARSER_FLAGS_NONE = 0x0,
/* The construct is optional. If it is not present, then no error
should be issued. */
CP_PARSER_FLAGS_OPTIONAL = 0x1,
/* When parsing a type-specifier, do not allow user-defined types. */
CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2
} cp_parser_flags;
/* The different kinds of declarators we want to parse. */
typedef enum cp_parser_declarator_kind
{
/* We want an abstract declarator. */
CP_PARSER_DECLARATOR_ABSTRACT,
/* We want a named declarator. */
CP_PARSER_DECLARATOR_NAMED,
/* We don't mind, but the name must be an unqualified-id. */
CP_PARSER_DECLARATOR_EITHER
} cp_parser_declarator_kind;
/* The precedence values used to parse binary expressions. The minimum value
of PREC must be 1, because zero is reserved to quickly discriminate
binary operators from other tokens. */
enum cp_parser_prec
{
PREC_NOT_OPERATOR,
PREC_LOGICAL_OR_EXPRESSION,
PREC_LOGICAL_AND_EXPRESSION,
PREC_INCLUSIVE_OR_EXPRESSION,
PREC_EXCLUSIVE_OR_EXPRESSION,
PREC_AND_EXPRESSION,
PREC_EQUALITY_EXPRESSION,
PREC_RELATIONAL_EXPRESSION,
PREC_SHIFT_EXPRESSION,
PREC_ADDITIVE_EXPRESSION,
PREC_MULTIPLICATIVE_EXPRESSION,
PREC_PM_EXPRESSION,
NUM_PREC_VALUES = PREC_PM_EXPRESSION
};
/* A mapping from a token type to a corresponding tree node type, with a
precedence value. */
typedef struct cp_parser_binary_operations_map_node
{
/* The token type. */
enum cpp_ttype token_type;
/* The corresponding tree code. */
enum tree_code tree_type;
/* The precedence of this operator. */
enum cp_parser_prec prec;
} cp_parser_binary_operations_map_node;
/* The status of a tentative parse. */
typedef enum cp_parser_status_kind
{
/* No errors have occurred. */
CP_PARSER_STATUS_KIND_NO_ERROR,
/* An error has occurred. */
CP_PARSER_STATUS_KIND_ERROR,
/* We are committed to this tentative parse, whether or not an error
has occurred. */
CP_PARSER_STATUS_KIND_COMMITTED
} cp_parser_status_kind;
typedef struct cp_parser_expression_stack_entry
{
tree lhs;
enum tree_code tree_type;
int prec;
} cp_parser_expression_stack_entry;
/* The stack for storing partial expressions. We only need NUM_PREC_VALUES
entries because precedence levels on the stack are monotonically
increasing. */
typedef struct cp_parser_expression_stack_entry
cp_parser_expression_stack[NUM_PREC_VALUES];
/* Context that is saved and restored when parsing tentatively. */
typedef struct cp_parser_context GTY (())
{
/* If this is a tentative parsing context, the status of the
tentative parse. */
enum cp_parser_status_kind status;
/* If non-NULL, we have just seen a `x->' or `x.' expression. Names
that are looked up in this context must be looked up both in the
scope given by OBJECT_TYPE (the type of `x' or `*x') and also in
the context of the containing expression. */
tree object_type;
/* The next parsing context in the stack. */
struct cp_parser_context *next;
} cp_parser_context;
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser_context *cp_parser_context_new
(cp_parser_context *);
/* Class variables. */
static GTY((deletable)) cp_parser_context* cp_parser_context_free_list;
/* The operator-precedence table used by cp_parser_binary_expression.
Transformed into an associative array (binops_by_token) by
cp_parser_new. */
static const cp_parser_binary_operations_map_node binops[] = {
{ CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION },
{ CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION },
{ CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION },
{ CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION },
{ CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION },
{ CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION },
{ CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION }
};
/* The same as binops, but initialized by cp_parser_new so that
binops_by_token[N].token_type == N. Used in cp_parser_binary_expression
for speed. */
static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES];
/* Constructors and destructors. */
/* Construct a new context. The context below this one on the stack
is given by NEXT. */
static cp_parser_context *
cp_parser_context_new (cp_parser_context* next)
{
cp_parser_context *context;
/* Allocate the storage. */
if (cp_parser_context_free_list != NULL)
{
/* Pull the first entry from the free list. */
context = cp_parser_context_free_list;
cp_parser_context_free_list = context->next;
memset (context, 0, sizeof (*context));
}
else
context = GGC_CNEW (cp_parser_context);
/* No errors have occurred yet in this context. */
context->status = CP_PARSER_STATUS_KIND_NO_ERROR;
/* If this is not the bottomost context, copy information that we
need from the previous context. */
if (next)
{
/* If, in the NEXT context, we are parsing an `x->' or `x.'
expression, then we are parsing one in this context, too. */
context->object_type = next->object_type;
/* Thread the stack. */
context->next = next;
}
return context;
}
/* The cp_parser structure represents the C++ parser. */
typedef struct cp_parser GTY(())
{
/* The lexer from which we are obtaining tokens. */
cp_lexer *lexer;
/* The scope in which names should be looked up. If NULL_TREE, then
we look up names in the scope that is currently open in the
source program. If non-NULL, this is either a TYPE or
NAMESPACE_DECL for the scope in which we should look. It can
also be ERROR_MARK, when we've parsed a bogus scope.
This value is not cleared automatically after a name is looked
up, so we must be careful to clear it before starting a new look
up sequence. (If it is not cleared, then `X::Y' followed by `Z'
will look up `Z' in the scope of `X', rather than the current
scope.) Unfortunately, it is difficult to tell when name lookup
is complete, because we sometimes peek at a token, look it up,
and then decide not to consume it. */
tree scope;
/* OBJECT_SCOPE and QUALIFYING_SCOPE give the scopes in which the
last lookup took place. OBJECT_SCOPE is used if an expression
like "x->y" or "x.y" was used; it gives the type of "*x" or "x",
respectively. QUALIFYING_SCOPE is used for an expression of the
form "X::Y"; it refers to X. */
tree object_scope;
tree qualifying_scope;
/* A stack of parsing contexts. All but the bottom entry on the
stack will be tentative contexts.
We parse tentatively in order to determine which construct is in
use in some situations. For example, in order to determine
whether a statement is an expression-statement or a
declaration-statement we parse it tentatively as a
declaration-statement. If that fails, we then reparse the same
token stream as an expression-statement. */
cp_parser_context *context;
/* True if we are parsing GNU C++. If this flag is not set, then
GNU extensions are not recognized. */
bool allow_gnu_extensions_p;
/* TRUE if the `>' token should be interpreted as the greater-than
operator. FALSE if it is the end of a template-id or
template-parameter-list. */
bool greater_than_is_operator_p;
/* TRUE if default arguments are allowed within a parameter list
that starts at this point. FALSE if only a gnu extension makes
them permissible. */
bool default_arg_ok_p;
/* TRUE if we are parsing an integral constant-expression. See
[expr.const] for a precise definition. */
bool integral_constant_expression_p;
/* TRUE if we are parsing an integral constant-expression -- but a
non-constant expression should be permitted as well. This flag
is used when parsing an array bound so that GNU variable-length
arrays are tolerated. */
bool allow_non_integral_constant_expression_p;
/* TRUE if ALLOW_NON_CONSTANT_EXPRESSION_P is TRUE and something has
been seen that makes the expression non-constant. */
bool non_integral_constant_expression_p;
/* TRUE if local variable names and `this' are forbidden in the
current context. */
bool local_variables_forbidden_p;
/* TRUE if the declaration we are parsing is part of a
linkage-specification of the form `extern string-literal
declaration'. */
bool in_unbraced_linkage_specification_p;
/* TRUE if we are presently parsing a declarator, after the
direct-declarator. */
bool in_declarator_p;
/* TRUE if we are presently parsing a template-argument-list. */
bool in_template_argument_list_p;
/* Set to IN_ITERATION_STMT if parsing an iteration-statement,
to IN_OMP_BLOCK if parsing OpenMP structured block and
IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement,
this is bitwise ORed with IN_SWITCH_STMT, unless parsing an
iteration-statement, OpenMP block or loop within that switch. */
#define IN_SWITCH_STMT 1
#define IN_ITERATION_STMT 2
#define IN_OMP_BLOCK 4
#define IN_OMP_FOR 8
unsigned char in_statement;
/* TRUE if we are presently parsing the body of a switch statement.
Note that this doesn't quite overlap with in_statement above.
The difference relates to giving the right sets of error messages:
"case not in switch" vs "break statement used with OpenMP...". */
bool in_switch_statement_p;
/* TRUE if we are parsing a type-id in an expression context. In
such a situation, both "type (expr)" and "type (type)" are valid
alternatives. */
bool in_type_id_in_expr_p;
/* TRUE if we are currently in a header file where declarations are
implicitly extern "C". */
bool implicit_extern_c;
/* TRUE if strings in expressions should be translated to the execution
character set. */
bool translate_strings_p;
/* TRUE if we are presently parsing the body of a function, but not
a local class. */
bool in_function_body;
/* If non-NULL, then we are parsing a construct where new type
definitions are not permitted. The string stored here will be
issued as an error message if a type is defined. */
const char *type_definition_forbidden_message;
/* A list of lists. The outer list is a stack, used for member
functions of local classes. At each level there are two sub-list,
one on TREE_VALUE and one on TREE_PURPOSE. Each of those
sub-lists has a FUNCTION_DECL or TEMPLATE_DECL on their
TREE_VALUE's. The functions are chained in reverse declaration
order.
The TREE_PURPOSE sublist contains those functions with default
arguments that need post processing, and the TREE_VALUE sublist
contains those functions with definitions that need post
processing.
These lists can only be processed once the outermost class being
defined is complete. */
tree unparsed_functions_queues;
/* The number of classes whose definitions are currently in
progress. */
unsigned num_classes_being_defined;
/* The number of template parameter lists that apply directly to the
current declaration. */
unsigned num_template_parameter_lists;
} cp_parser;
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser *cp_parser_new
(void);
/* Routines to parse various constructs.
Those that return `tree' will return the error_mark_node (rather
than NULL_TREE) if a parse error occurs, unless otherwise noted.
Sometimes, they will return an ordinary node if error-recovery was
attempted, even though a parse error occurred. So, to check
whether or not a parse error occurred, you should always use
cp_parser_error_occurred. If the construct is optional (indicated
either by an `_opt' in the name of the function that does the
parsing or via a FLAGS parameter), then NULL_TREE is returned if
the construct is not present. */
/* Lexical conventions [gram.lex] */
static tree cp_parser_identifier
(cp_parser *);
static tree cp_parser_string_literal
(cp_parser *, bool, bool);
/* Basic concepts [gram.basic] */
static bool cp_parser_translation_unit
(cp_parser *);
/* Expressions [gram.expr] */
static tree cp_parser_primary_expression
(cp_parser *, bool, bool, bool, cp_id_kind *);
static tree cp_parser_id_expression
(cp_parser *, bool, bool, bool *, bool, bool);
static tree cp_parser_unqualified_id
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier_opt
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_class_or_namespace_name
(cp_parser *, bool, bool, bool, bool, bool);
static tree cp_parser_postfix_expression
(cp_parser *, bool, bool);
static tree cp_parser_postfix_open_square_expression
(cp_parser *, tree, bool);
static tree cp_parser_postfix_dot_deref_expression
(cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *);
static tree cp_parser_parenthesized_expression_list
(cp_parser *, bool, bool, bool *);
static void cp_parser_pseudo_destructor_name
(cp_parser *, tree *, tree *);
static tree cp_parser_unary_expression
(cp_parser *, bool, bool);
static enum tree_code cp_parser_unary_operator
(cp_token *);
static tree cp_parser_new_expression
(cp_parser *);
static tree cp_parser_new_placement
(cp_parser *);
static tree cp_parser_new_type_id
(cp_parser *, tree *);
static cp_declarator *cp_parser_new_declarator_opt
(cp_parser *);
static cp_declarator *cp_parser_direct_new_declarator
(cp_parser *);
static tree cp_parser_new_initializer
(cp_parser *);
static tree cp_parser_delete_expression
(cp_parser *);
static tree cp_parser_cast_expression
(cp_parser *, bool, bool);
static tree cp_parser_binary_expression
(cp_parser *, bool, enum cp_parser_prec);
static tree cp_parser_question_colon_clause
(cp_parser *, tree);
static tree cp_parser_assignment_expression
(cp_parser *, bool);
static enum tree_code cp_parser_assignment_operator_opt
(cp_parser *);
static tree cp_parser_expression
(cp_parser *, bool);
static tree cp_parser_constant_expression
(cp_parser *, bool, bool *);
static tree cp_parser_builtin_offsetof
(cp_parser *);
/* Statements [gram.stmt.stmt] */
static void cp_parser_statement
(cp_parser *, tree, bool);
static void cp_parser_label_for_labeled_statement
(cp_parser *);
static tree cp_parser_expression_statement
(cp_parser *, tree);
static tree cp_parser_compound_statement
(cp_parser *, tree, bool);
static void cp_parser_statement_seq_opt
(cp_parser *, tree);
static tree cp_parser_selection_statement
(cp_parser *);
static tree cp_parser_condition
(cp_parser *);
static tree cp_parser_iteration_statement
(cp_parser *);
static void cp_parser_for_init_statement
(cp_parser *);
static tree cp_parser_jump_statement
(cp_parser *);
static void cp_parser_declaration_statement
(cp_parser *);
static tree cp_parser_implicitly_scoped_statement
(cp_parser *);
static void cp_parser_already_scoped_statement
(cp_parser *);
/* Declarations [gram.dcl.dcl] */
static void cp_parser_declaration_seq_opt
(cp_parser *);
static void cp_parser_declaration
(cp_parser *);
static void cp_parser_block_declaration
(cp_parser *, bool);
static void cp_parser_simple_declaration
(cp_parser *, bool);
static void cp_parser_decl_specifier_seq
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *);
static tree cp_parser_storage_class_specifier_opt
(cp_parser *);
static tree cp_parser_function_specifier_opt
(cp_parser *, cp_decl_specifier_seq *);
static tree cp_parser_type_specifier
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool,
int *, bool *);
static tree cp_parser_simple_type_specifier
(cp_parser *, cp_decl_specifier_seq *, cp_parser_flags);
static tree cp_parser_type_name
(cp_parser *);
static tree cp_parser_elaborated_type_specifier
(cp_parser *, bool, bool);
static tree cp_parser_enum_specifier
(cp_parser *);
static void cp_parser_enumerator_list
(cp_parser *, tree);
static void cp_parser_enumerator_definition
(cp_parser *, tree);
static tree cp_parser_namespace_name
(cp_parser *);
static void cp_parser_namespace_definition
(cp_parser *);
static void cp_parser_namespace_body
(cp_parser *);
static tree cp_parser_qualified_namespace_specifier
(cp_parser *);
static void cp_parser_namespace_alias_definition
(cp_parser *);
static bool cp_parser_using_declaration
(cp_parser *, bool);
static void cp_parser_using_directive
(cp_parser *);
static void cp_parser_asm_definition
(cp_parser *);
static void cp_parser_linkage_specification
(cp_parser *);
/* Declarators [gram.dcl.decl] */
static tree cp_parser_init_declarator
(cp_parser *, cp_decl_specifier_seq *, VEC (deferred_access_check,gc)*, bool, bool, int, bool *);
static cp_declarator *cp_parser_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool *, bool);
static cp_declarator *cp_parser_direct_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool);
static enum tree_code cp_parser_ptr_operator
(cp_parser *, tree *, cp_cv_quals *);
static cp_cv_quals cp_parser_cv_qualifier_seq_opt
(cp_parser *);
static tree cp_parser_declarator_id
(cp_parser *, bool);
static tree cp_parser_type_id
(cp_parser *);
static void cp_parser_type_specifier_seq
(cp_parser *, bool, cp_decl_specifier_seq *);
static cp_parameter_declarator *cp_parser_parameter_declaration_clause
(cp_parser *);
static cp_parameter_declarator *cp_parser_parameter_declaration_list
(cp_parser *, bool *);
static cp_parameter_declarator *cp_parser_parameter_declaration
(cp_parser *, bool, bool *);
static void cp_parser_function_body
(cp_parser *);
static tree cp_parser_initializer
(cp_parser *, bool *, bool *);
static tree cp_parser_initializer_clause
(cp_parser *, bool *);
static VEC(constructor_elt,gc) *cp_parser_initializer_list
(cp_parser *, bool *);
static bool cp_parser_ctor_initializer_opt_and_function_body
(cp_parser *);
/* Classes [gram.class] */
static tree cp_parser_class_name
(cp_parser *, bool, bool, enum tag_types, bool, bool, bool);
static tree cp_parser_class_specifier
(cp_parser *);
static tree cp_parser_class_head
(cp_parser *, bool *, tree *, tree *);
static enum tag_types cp_parser_class_key
(cp_parser *);
static void cp_parser_member_specification_opt
(cp_parser *);
static void cp_parser_member_declaration
(cp_parser *);
static tree cp_parser_pure_specifier
(cp_parser *);
static tree cp_parser_constant_initializer
(cp_parser *);
/* Derived classes [gram.class.derived] */
static tree cp_parser_base_clause
(cp_parser *);
static tree cp_parser_base_specifier
(cp_parser *);
/* Special member functions [gram.special] */
static tree cp_parser_conversion_function_id
(cp_parser *);
static tree cp_parser_conversion_type_id
(cp_parser *);
static cp_declarator *cp_parser_conversion_declarator_opt
(cp_parser *);
static bool cp_parser_ctor_initializer_opt
(cp_parser *);
static void cp_parser_mem_initializer_list
(cp_parser *);
static tree cp_parser_mem_initializer
(cp_parser *);
static tree cp_parser_mem_initializer_id
(cp_parser *);
/* Overloading [gram.over] */
static tree cp_parser_operator_function_id
(cp_parser *);
static tree cp_parser_operator
(cp_parser *);
/* Templates [gram.temp] */
static void cp_parser_template_declaration
(cp_parser *, bool);
static tree cp_parser_template_parameter_list
(cp_parser *);
static tree cp_parser_template_parameter
(cp_parser *, bool *);
static tree cp_parser_type_parameter
(cp_parser *);
static tree cp_parser_template_id
(cp_parser *, bool, bool, bool);
static tree cp_parser_template_name
(cp_parser *, bool, bool, bool, bool *);
static tree cp_parser_template_argument_list
(cp_parser *);
static tree cp_parser_template_argument
(cp_parser *);
static void cp_parser_explicit_instantiation
(cp_parser *);
static void cp_parser_explicit_specialization
(cp_parser *);
/* Exception handling [gram.exception] */
static tree cp_parser_try_block
(cp_parser *);
static bool cp_parser_function_try_block
(cp_parser *);
static void cp_parser_handler_seq
(cp_parser *);
static void cp_parser_handler
(cp_parser *);
static tree cp_parser_exception_declaration
(cp_parser *);
static tree cp_parser_throw_expression
(cp_parser *);
static tree cp_parser_exception_specification_opt
(cp_parser *);
static tree cp_parser_type_id_list
(cp_parser *);
/* GNU Extensions */
static tree cp_parser_asm_specification_opt
(cp_parser *);
static tree cp_parser_asm_operand_list
(cp_parser *);
static tree cp_parser_asm_clobber_list
(cp_parser *);
static tree cp_parser_attributes_opt
(cp_parser *);
static tree cp_parser_attribute_list
(cp_parser *);
static bool cp_parser_extension_opt
(cp_parser *, int *);
static void cp_parser_label_declaration
(cp_parser *);
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool cp_parser_pragma
(cp_parser *, enum pragma_context);
/* Objective-C++ Productions */
static tree cp_parser_objc_message_receiver
(cp_parser *);
static tree cp_parser_objc_message_args
(cp_parser *);
static tree cp_parser_objc_message_expression
(cp_parser *);
static tree cp_parser_objc_encode_expression
(cp_parser *);
static tree cp_parser_objc_defs_expression
(cp_parser *);
static tree cp_parser_objc_protocol_expression
(cp_parser *);
static tree cp_parser_objc_selector_expression
(cp_parser *);
static tree cp_parser_objc_expression
(cp_parser *);
static bool cp_parser_objc_selector_p
(enum cpp_ttype);
static tree cp_parser_objc_selector
(cp_parser *);
static tree cp_parser_objc_protocol_refs_opt
(cp_parser *);
static void cp_parser_objc_declaration
(cp_parser *);
static tree cp_parser_objc_statement
(cp_parser *);
/* Utility Routines */
static tree cp_parser_lookup_name
(cp_parser *, tree, enum tag_types, bool, bool, bool, tree *);
static tree cp_parser_lookup_name_simple
(cp_parser *, tree);
static tree cp_parser_maybe_treat_template_as_class
(tree, bool);
static bool cp_parser_check_declarator_template_parameters
(cp_parser *, cp_declarator *);
static bool cp_parser_check_template_parameters
(cp_parser *, unsigned);
static tree cp_parser_simple_cast_expression
(cp_parser *);
static tree cp_parser_global_scope_opt
(cp_parser *, bool);
static bool cp_parser_constructor_declarator_p
(cp_parser *, bool);
static tree cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *);
static tree cp_parser_function_definition_after_declarator
(cp_parser *, bool);
static void cp_parser_template_declaration_after_export
(cp_parser *, bool);
static void cp_parser_perform_template_parameter_access_checks
(VEC (deferred_access_check,gc)*);
static tree cp_parser_single_declaration
(cp_parser *, VEC (deferred_access_check,gc)*, bool, bool *);
static tree cp_parser_functional_cast
(cp_parser *, tree);
static tree cp_parser_save_member_function_body
(cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree);
static tree cp_parser_enclosed_template_argument_list
(cp_parser *);
static void cp_parser_save_default_args
(cp_parser *, tree);
static void cp_parser_late_parsing_for_member
(cp_parser *, tree);
static void cp_parser_late_parsing_default_args
(cp_parser *, tree);
static tree cp_parser_sizeof_operand
(cp_parser *, enum rid);
static bool cp_parser_declares_only_class_p
(cp_parser *);
static void cp_parser_set_storage_class
(cp_parser *, cp_decl_specifier_seq *, enum rid);
static void cp_parser_set_decl_spec_type
(cp_decl_specifier_seq *, tree, bool);
static bool cp_parser_friend_p
(const cp_decl_specifier_seq *);
static cp_token *cp_parser_require
(cp_parser *, enum cpp_ttype, const char *);
static cp_token *cp_parser_require_keyword
(cp_parser *, enum rid, const char *);
static bool cp_parser_token_starts_function_definition_p
(cp_token *);
static bool cp_parser_next_token_starts_class_definition_p
(cp_parser *);
static bool cp_parser_next_token_ends_template_argument_p
(cp_parser *);
static bool cp_parser_nth_token_starts_template_argument_list_p
(cp_parser *, size_t);
static enum tag_types cp_parser_token_is_class_key
(cp_token *);
static void cp_parser_check_class_key
(enum tag_types, tree type);
static void cp_parser_check_access_in_redeclaration
(tree type);
static bool cp_parser_optional_template_keyword
(cp_parser *);
static void cp_parser_pre_parsed_nested_name_specifier
(cp_parser *);
static void cp_parser_cache_group
(cp_parser *, enum cpp_ttype, unsigned);
static void cp_parser_parse_tentatively
(cp_parser *);
static void cp_parser_commit_to_tentative_parse
(cp_parser *);
static void cp_parser_abort_tentative_parse
(cp_parser *);
static bool cp_parser_parse_definitely
(cp_parser *);
static inline bool cp_parser_parsing_tentatively
(cp_parser *);
static bool cp_parser_uncommitted_to_tentative_parse_p
(cp_parser *);
static void cp_parser_error
(cp_parser *, const char *);
static void cp_parser_name_lookup_error
(cp_parser *, tree, tree, const char *);
static bool cp_parser_simulate_error
(cp_parser *);
static bool cp_parser_check_type_definition
(cp_parser *);
static void cp_parser_check_for_definition_in_return_type
(cp_declarator *, tree);
static void cp_parser_check_for_invalid_template_id
(cp_parser *, tree);
static bool cp_parser_non_integral_constant_expression
(cp_parser *, const char *);
static void cp_parser_diagnose_invalid_type_name
(cp_parser *, tree, tree);
static bool cp_parser_parse_and_diagnose_invalid_type_name
(cp_parser *);
static int cp_parser_skip_to_closing_parenthesis
(cp_parser *, bool, bool, bool);
static void cp_parser_skip_to_end_of_statement
(cp_parser *);
static void cp_parser_consume_semicolon_at_end_of_statement
(cp_parser *);
static void cp_parser_skip_to_end_of_block_or_statement
(cp_parser *);
static void cp_parser_skip_to_closing_brace
(cp_parser *);
static void cp_parser_skip_to_end_of_template_parameter_list
(cp_parser *);
static void cp_parser_skip_to_pragma_eol
(cp_parser*, cp_token *);
static bool cp_parser_error_occurred
(cp_parser *);
static bool cp_parser_allow_gnu_extensions_p
(cp_parser *);
static bool cp_parser_is_string_literal
(cp_token *);
static bool cp_parser_is_keyword
(cp_token *, enum rid);
static tree cp_parser_make_typename_type
(cp_parser *, tree, tree);
/* Returns nonzero if we are parsing tentatively. */
static inline bool
cp_parser_parsing_tentatively (cp_parser* parser)
{
return parser->context->next != NULL;
}
/* Returns nonzero if TOKEN is a string literal. */
static bool
cp_parser_is_string_literal (cp_token* token)
{
return (token->type == CPP_STRING || token->type == CPP_WSTRING);
}
/* Returns nonzero if TOKEN is the indicated KEYWORD. */
static bool
cp_parser_is_keyword (cp_token* token, enum rid keyword)
{
return token->keyword == keyword;
}
/* If not parsing tentatively, issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream. MESSAGE
(specified by the caller) is usually of the form "expected
OTHER-TOKEN". */
static void
cp_parser_error (cp_parser* parser, const char* message)
{
if (!cp_parser_simulate_error (parser))
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* This diagnostic makes more sense if it is tagged to the line
of the token we just peeked at. */
cp_lexer_set_source_position_from_token (token);
if (token->type == CPP_PRAGMA)
{
error ("%<#pragma%> is not allowed here");
cp_parser_skip_to_pragma_eol (parser, token);
return;
}
c_parse_error (message,
/* Because c_parser_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
token->u.value);
}
}
/* Issue an error about name-lookup failing. NAME is the
IDENTIFIER_NODE DECL is the result of
the lookup (as returned from cp_parser_lookup_name). DESIRED is
the thing that we hoped to find. */
static void
cp_parser_name_lookup_error (cp_parser* parser,
tree name,
tree decl,
const char* desired)
{
/* If name lookup completely failed, tell the user that NAME was not
declared. */
if (decl == error_mark_node)
{
if (parser->scope && parser->scope != global_namespace)
error ("%<%D::%D%> has not been declared",
parser->scope, name);
else if (parser->scope == global_namespace)
error ("%<::%D%> has not been declared", name);
else if (parser->object_scope
&& !CLASS_TYPE_P (parser->object_scope))
error ("request for member %qD in non-class type %qT",
name, parser->object_scope);
else if (parser->object_scope)
error ("%<%T::%D%> has not been declared",
parser->object_scope, name);
else
error ("%qD has not been declared", name);
}
else if (parser->scope && parser->scope != global_namespace)
error ("%<%D::%D%> %s", parser->scope, name, desired);
else if (parser->scope == global_namespace)
error ("%<::%D%> %s", name, desired);
else
error ("%qD %s", name, desired);
}
/* If we are parsing tentatively, remember that an error has occurred
during this tentative parse. Returns true if the error was
simulated; false if a message should be issued by the caller. */
static bool
cp_parser_simulate_error (cp_parser* parser)
{
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
parser->context->status = CP_PARSER_STATUS_KIND_ERROR;
return true;
}
return false;
}
/* Check for repeated decl-specifiers. */
static void
cp_parser_check_decl_spec (cp_decl_specifier_seq *decl_specs)
{
cp_decl_spec ds;
for (ds = ds_first; ds != ds_last; ++ds)
{
unsigned count = decl_specs->specs[(int)ds];
if (count < 2)
continue;
/* The "long" specifier is a special case because of "long long". */
if (ds == ds_long)
{
if (count > 2)
error ("%<long long long%> is too long for GCC");
else if (pedantic && !in_system_header && warn_long_long)
pedwarn ("ISO C++ does not support %<long long%>");
}
else if (count > 1)
{
static const char *const decl_spec_names[] = {
"signed",
"unsigned",
"short",
"long",
"const",
"volatile",
"restrict",
"inline",
"virtual",
"explicit",
"friend",
"typedef",
"__complex",
"__thread"
};
error ("duplicate %qs", decl_spec_names[(int)ds]);
}
}
}
/* This function is called when a type is defined. If type
definitions are forbidden at this point, an error message is
issued. */
static bool
cp_parser_check_type_definition (cp_parser* parser)
{
/* If types are forbidden here, issue a message. */
if (parser->type_definition_forbidden_message)
{
/* Use `%s' to print the string in case there are any escape
characters in the message. */
error ("%s", parser->type_definition_forbidden_message);
return false;
}
return true;
}
/* This function is called when the DECLARATOR is processed. The TYPE
was a type defined in the decl-specifiers. If it is invalid to
define a type in the decl-specifiers for DECLARATOR, an error is
issued. */
static void
cp_parser_check_for_definition_in_return_type (cp_declarator *declarator,
tree type)
{
/* [dcl.fct] forbids type definitions in return types.
Unfortunately, it's not easy to know whether or not we are
processing a return type until after the fact. */
while (declarator
&& (declarator->kind == cdk_pointer
|| declarator->kind == cdk_reference
|| declarator->kind == cdk_ptrmem))
declarator = declarator->declarator;
if (declarator
&& declarator->kind == cdk_function)
{
error ("new types may not be defined in a return type");
inform ("(perhaps a semicolon is missing after the definition of %qT)",
type);
}
}
/* A type-specifier (TYPE) has been parsed which cannot be followed by
"<" in any valid C++ program. If the next token is indeed "<",
issue a message warning the user about what appears to be an
invalid attempt to form a template-id. */
static void
cp_parser_check_for_invalid_template_id (cp_parser* parser,
tree type)
{
cp_token_position start = 0;
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
if (TYPE_P (type))
error ("%qT is not a template", type);
else if (TREE_CODE (type) == IDENTIFIER_NODE)
error ("%qE is not a template", type);
else
error ("invalid template-id");
/* Remember the location of the invalid "<". */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Consume the "<". */
cp_lexer_consume_token (parser->lexer);
/* Parse the template arguments. */
cp_parser_enclosed_template_argument_list (parser);
/* Permanently remove the invalid template arguments so that
this error message is not issued again. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
}
}
/* If parsing an integral constant-expression, issue an error message
about the fact that THING appeared and return true. Otherwise,
return false. In either case, set
PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */
static bool
cp_parser_non_integral_constant_expression (cp_parser *parser,
const char *thing)
{
parser->non_integral_constant_expression_p = true;
if (parser->integral_constant_expression_p)
{
if (!parser->allow_non_integral_constant_expression_p)
{
error ("%s cannot appear in a constant-expression", thing);
return true;
}
}
return false;
}
/* Emit a diagnostic for an invalid type name. SCOPE is the
qualifying scope (or NULL, if none) for ID. This function commits
to the current active tentative parse, if any. (Otherwise, the
problematic construct might be encountered again later, resulting
in duplicate error messages.) */
static void
cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree scope, tree id)
{
tree decl, old_scope;
/* Try to lookup the identifier. */
old_scope = parser->scope;
parser->scope = scope;
decl = cp_parser_lookup_name_simple (parser, id);
parser->scope = old_scope;
/* If the lookup found a template-name, it means that the user forgot
to specify an argument list. Emit a useful error message. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
error ("invalid use of template-name %qE without an argument list", decl);
else if (TREE_CODE (id) == BIT_NOT_EXPR)
error ("invalid use of destructor %qD as a type", id);
else if (TREE_CODE (decl) == TYPE_DECL)
/* Something like 'unsigned A a;' */
error ("invalid combination of multiple type-specifiers");
else if (!parser->scope)
{
/* Issue an error message. */
error ("%qE does not name a type", id);
/* If we're in a template class, it's possible that the user was
referring to a type from a base class. For example:
template <typename T> struct A { typedef T X; };
template <typename T> struct B : public A<T> { X x; };
The user should have said "typename A<T>::X". */
if (processing_template_decl && current_class_type
&& TYPE_BINFO (current_class_type))
{
tree b;
for (b = TREE_CHAIN (TYPE_BINFO (current_class_type));
b;
b = TREE_CHAIN (b))
{
tree base_type = BINFO_TYPE (b);
if (CLASS_TYPE_P (base_type)
&& dependent_type_p (base_type))
{
tree field;
/* Go from a particular instantiation of the
template (which will have an empty TYPE_FIELDs),
to the main version. */
base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type);
for (field = TYPE_FIELDS (base_type);
field;
field = TREE_CHAIN (field))
if (TREE_CODE (field) == TYPE_DECL
&& DECL_NAME (field) == id)
{
inform ("(perhaps %<typename %T::%E%> was intended)",
BINFO_TYPE (b), id);
break;
}
if (field)
break;
}
}
}
}
/* Here we diagnose qualified-ids where the scope is actually correct,
but the identifier does not resolve to a valid type name. */
else if (parser->scope != error_mark_node)
{
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
error ("%qE in namespace %qE does not name a type",
id, parser->scope);
else if (TYPE_P (parser->scope))
error ("%qE in class %qT does not name a type", id, parser->scope);
else
gcc_unreachable ();
}
cp_parser_commit_to_tentative_parse (parser);
}
/* Check for a common situation where a type-name should be present,
but is not, and issue a sensible error message. Returns true if an
invalid type-name was detected.
The situation handled by this function are variable declarations of the
form `ID a', where `ID' is an id-expression and `a' is a plain identifier.
Usually, `ID' should name a type, but if we got here it means that it
does not. We try to emit the best possible error message depending on
how exactly the id-expression looks like. */
static bool
cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser)
{
tree id;
cp_parser_parse_tentatively (parser);
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/true,
/*optional_p=*/false);
/* After the id-expression, there should be a plain identifier,
otherwise this is not a simple variable declaration. Also, if
the scope is dependent, we cannot do much. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_NAME)
|| (parser->scope && TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))
|| TREE_CODE (id) == TYPE_DECL)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
if (!cp_parser_parse_definitely (parser))
return false;
/* Emit a diagnostic for the invalid type. */
cp_parser_diagnose_invalid_type_name (parser, parser->scope, id);
/* Skip to the end of the declaration; there's no point in
trying to process it. */
cp_parser_skip_to_end_of_block_or_statement (parser);
return true;
}
/* Consume tokens up to, and including, the next non-nested closing `)'.
Returns 1 iff we found a closing `)'. RECOVERING is true, if we
are doing error recovery. Returns -1 if OR_COMMA is true and we
found an unnested comma. */
static int
cp_parser_skip_to_closing_parenthesis (cp_parser *parser,
bool recovering,
bool or_comma,
bool consume_paren)
{
unsigned paren_depth = 0;
unsigned brace_depth = 0;
if (recovering && !or_comma
&& cp_parser_uncommitted_to_tentative_parse_p (parser))
return 0;
while (true)
{
cp_token * token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, then there is no closing `)'. */
return 0;
case CPP_SEMICOLON:
/* This matches the processing in skip_to_end_of_statement. */
if (!brace_depth)
return 0;
break;
case CPP_OPEN_BRACE:
++brace_depth;
break;
case CPP_CLOSE_BRACE:
if (!brace_depth--)
return 0;
break;
case CPP_COMMA:
if (recovering && or_comma && !brace_depth && !paren_depth)
return -1;
break;
case CPP_OPEN_PAREN:
if (!brace_depth)
++paren_depth;
break;
case CPP_CLOSE_PAREN:
if (!brace_depth && !paren_depth--)
{
if (consume_paren)
cp_lexer_consume_token (parser->lexer);
return 1;
}
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the current statement.
Normally, that will be just before consuming a `;'. However, if a
non-nested `}' comes first, then we stop before consuming that. */
static void
cp_parser_skip_to_end_of_statement (cp_parser* parser)
{
unsigned nesting_depth = 0;
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* If the next token is a `;', we have reached the end of the
statement. */
if (!nesting_depth)
return;
break;
case CPP_CLOSE_BRACE:
/* If this is a non-nested '}', stop before consuming it.
That way, when confronted with something like:
{ 3 + }
we stop before consuming the closing '}', even though we
have not yet reached a `;'. */
if (nesting_depth == 0)
return;
/* If it is the closing '}' for a block that we have
scanned, stop -- but only after consuming the token.
That way given:
void f g () { ... }
typedef int I;
we will stop after the body of the erroneously declared
function, but before consuming the following `typedef'
declaration. */
if (--nesting_depth == 0)
{
cp_lexer_consume_token (parser->lexer);
return;
}
case CPP_OPEN_BRACE:
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* This function is called at the end of a statement or declaration.
If the next token is a semicolon, it is consumed; otherwise, error
recovery is attempted. */
static void
cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser)
{
/* Look for the trailing `;'. */
if (!cp_parser_require (parser, CPP_SEMICOLON, "`;'"))
{
/* If there is additional (erroneous) input, skip to the end of
the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested `;'. */
static void
cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser)
{
int nesting_depth = 0;
while (nesting_depth >= 0)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* Stop if this is an unnested ';'. */
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_CLOSE_BRACE:
/* Stop if this is an unnested '}', or closes the outermost
nesting level. */
nesting_depth--;
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_OPEN_BRACE:
/* Nest. */
nesting_depth++;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until a non-nested closing curly brace is the next
token. */
static void
cp_parser_skip_to_closing_brace (cp_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested `}', then we have reached
the end of the current block. */
if (nesting_depth-- == 0)
return;
break;
case CPP_OPEN_BRACE:
/* If it the next token is a `{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK
parameter is the PRAGMA token, allowing us to purge the entire pragma
sequence. */
static void
cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok)
{
cp_token *token;
parser->lexer->in_pragma = false;
do
token = cp_lexer_consume_token (parser->lexer);
while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF);
/* Ensure that the pragma is not parsed again. */
cp_lexer_purge_tokens_after (parser->lexer, pragma_tok);
}
/* Require pragma end of line, resyncing with it as necessary. The
arguments are as for cp_parser_skip_to_pragma_eol. */
static void
cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok)
{
parser->lexer->in_pragma = false;
if (!cp_parser_require (parser, CPP_PRAGMA_EOL, "end of line"))
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
}
/* This is a simple wrapper around make_typename_type. When the id is
an unresolved identifier node, we can provide a superior diagnostic
using cp_parser_diagnose_invalid_type_name. */
static tree
cp_parser_make_typename_type (cp_parser *parser, tree scope, tree id)
{
tree result;
if (TREE_CODE (id) == IDENTIFIER_NODE)
{
result = make_typename_type (scope, id, typename_type,
/*complain=*/tf_none);
if (result == error_mark_node)
cp_parser_diagnose_invalid_type_name (parser, scope, id);
return result;
}
return make_typename_type (scope, id, typename_type, tf_error);
}
/* Create a new C++ parser. */
static cp_parser *
cp_parser_new (void)
{
cp_parser *parser;
cp_lexer *lexer;
unsigned i;
/* cp_lexer_new_main is called before calling ggc_alloc because
cp_lexer_new_main might load a PCH file. */
lexer = cp_lexer_new_main ();
/* Initialize the binops_by_token so that we can get the tree
directly from the token. */
for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++)
binops_by_token[binops[i].token_type] = binops[i];
parser = GGC_CNEW (cp_parser);
parser->lexer = lexer;
parser->context = cp_parser_context_new (NULL);
/* For now, we always accept GNU extensions. */
parser->allow_gnu_extensions_p = 1;
/* The `>' token is a greater-than operator, not the end of a
template-id. */
parser->greater_than_is_operator_p = true;
parser->default_arg_ok_p = true;
/* We are not parsing a constant-expression. */
parser->integral_constant_expression_p = false;
parser->allow_non_integral_constant_expression_p = false;
parser->non_integral_constant_expression_p = false;
/* Local variable names are not forbidden. */
parser->local_variables_forbidden_p = false;
/* We are not processing an `extern "C"' declaration. */
parser->in_unbraced_linkage_specification_p = false;
/* We are not processing a declarator. */
parser->in_declarator_p = false;
/* We are not processing a template-argument-list. */
parser->in_template_argument_list_p = false;
/* We are not in an iteration statement. */
parser->in_statement = 0;
/* We are not in a switch statement. */
parser->in_switch_statement_p = false;
/* We are not parsing a type-id inside an expression. */
parser->in_type_id_in_expr_p = false;
/* Declarations aren't implicitly extern "C". */
parser->implicit_extern_c = false;
/* String literals should be translated to the execution character set. */
parser->translate_strings_p = true;
/* We are not parsing a function body. */
parser->in_function_body = false;
/* The unparsed function queue is empty. */
parser->unparsed_functions_queues = build_tree_list (NULL_TREE, NULL_TREE);
/* There are no classes being defined. */
parser->num_classes_being_defined = 0;
/* No template parameters apply. */
parser->num_template_parameter_lists = 0;
return parser;
}
/* Create a cp_lexer structure which will emit the tokens in CACHE
and push it onto the parser's lexer stack. This is used for delayed
parsing of in-class method bodies and default arguments, and should
not be confused with tentative parsing. */
static void
cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache)
{
cp_lexer *lexer = cp_lexer_new_from_tokens (cache);
lexer->next = parser->lexer;
parser->lexer = lexer;
/* Move the current source position to that of the first token in the
new lexer. */
cp_lexer_set_source_position_from_token (lexer->next_token);
}
/* Pop the top lexer off the parser stack. This is never used for the
"main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */
static void
cp_parser_pop_lexer (cp_parser *parser)
{
cp_lexer *lexer = parser->lexer;
parser->lexer = lexer->next;
cp_lexer_destroy (lexer);
/* Put the current source position back where it was before this
lexer was pushed. */
cp_lexer_set_source_position_from_token (parser->lexer->next_token);
}
/* Lexical conventions [gram.lex] */
/* Parse an identifier. Returns an IDENTIFIER_NODE representing the
identifier. */
static tree
cp_parser_identifier (cp_parser* parser)
{
cp_token *token;
/* Look for the identifier. */
token = cp_parser_require (parser, CPP_NAME, "identifier");
/* Return the value. */
return token ? token->u.value : error_mark_node;
}
/* Parse a sequence of adjacent string constants. Returns a
TREE_STRING representing the combined, nul-terminated string
constant. If TRANSLATE is true, translate the string to the
execution character set. If WIDE_OK is true, a wide string is
invalid here.
C++98 [lex.string] says that if a narrow string literal token is
adjacent to a wide string literal token, the behavior is undefined.
However, C99 6.4.5p4 says that this results in a wide string literal.
We follow C99 here, for consistency with the C front end.
This code is largely lifted from lex_string() in c-lex.c.
FUTURE: ObjC++ will need to handle @-strings here. */
static tree
cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok)
{
tree value;
bool wide = false;
size_t count;
struct obstack str_ob;
cpp_string str, istr, *strs;
cp_token *tok;
tok = cp_lexer_peek_token (parser->lexer);
if (!cp_parser_is_string_literal (tok))
{
cp_parser_error (parser, "expected string-literal");
return error_mark_node;
}
/* Try to avoid the overhead of creating and destroying an obstack
for the common case of just one string. */
if (!cp_parser_is_string_literal
(cp_lexer_peek_nth_token (parser->lexer, 2)))
{
cp_lexer_consume_token (parser->lexer);
str.text = (const unsigned char *)TREE_STRING_POINTER (tok->u.value);
str.len = TREE_STRING_LENGTH (tok->u.value);
count = 1;
if (tok->type == CPP_WSTRING)
wide = true;
strs = &str;
}
else
{
gcc_obstack_init (&str_ob);
count = 0;
do
{
cp_lexer_consume_token (parser->lexer);
count++;
str.text = (unsigned char *)TREE_STRING_POINTER (tok->u.value);
str.len = TREE_STRING_LENGTH (tok->u.value);
if (tok->type == CPP_WSTRING)
wide = true;
obstack_grow (&str_ob, &str, sizeof (cpp_string));
tok = cp_lexer_peek_token (parser->lexer);
}
while (cp_parser_is_string_literal (tok));
strs = (cpp_string *) obstack_finish (&str_ob);
}
if (wide && !wide_ok)
{
cp_parser_error (parser, "a wide string is invalid in this context");
wide = false;
}
if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate)
(parse_in, strs, count, &istr, wide))
{
value = build_string (istr.len, (char *)istr.text);
free ((void *)istr.text);
TREE_TYPE (value) = wide ? wchar_array_type_node : char_array_type_node;
value = fix_string_type (value);
}
else
/* cpp_interpret_string has issued an error. */
value = error_mark_node;
if (count > 1)
obstack_free (&str_ob, 0);
return value;
}
/* Basic concepts [gram.basic] */
/* Parse a translation-unit.
translation-unit:
declaration-seq [opt]
Returns TRUE if all went well. */
static bool
cp_parser_translation_unit (cp_parser* parser)
{
/* The address of the first non-permanent object on the declarator
obstack. */
static void *declarator_obstack_base;
bool success;
/* Create the declarator obstack, if necessary. */
if (!cp_error_declarator)
{
gcc_obstack_init (&declarator_obstack);
/* Create the error declarator. */
cp_error_declarator = make_declarator (cdk_error);
/* Create the empty parameter list. */
no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE);
/* Remember where the base of the declarator obstack lies. */
declarator_obstack_base = obstack_next_free (&declarator_obstack);
}
cp_parser_declaration_seq_opt (parser);
/* If there are no tokens left then all went well. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
/* Get rid of the token array; we don't need it any more. */
cp_lexer_destroy (parser->lexer);
parser->lexer = NULL;
/* This file might have been a context that's implicitly extern
"C". If so, pop the lang context. (Only relevant for PCH.) */
if (parser->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
/* Finish up. */
finish_translation_unit ();
success = true;
}
else
{
cp_parser_error (parser, "expected declaration");
success = false;
}
/* Make sure the declarator obstack was fully cleaned up. */
gcc_assert (obstack_next_free (&declarator_obstack)
== declarator_obstack_base);
/* All went well. */
return success;
}
/* Expressions [gram.expr] */
/* Parse a primary-expression.
primary-expression:
literal
this
( expression )
id-expression
GNU Extensions:
primary-expression:
( compound-statement )
__builtin_va_arg ( assignment-expression , type-id )
__builtin_offsetof ( type-id , offsetof-expression )
Objective-C++ Extension:
primary-expression:
objc-expression
literal:
__null
ADDRESS_P is true iff this expression was immediately preceded by
"&" and therefore might denote a pointer-to-member. CAST_P is true
iff this expression is the target of a cast. TEMPLATE_ARG_P is
true iff this expression is a template argument.
Returns a representation of the expression. Upon return, *IDK
indicates what kind of id-expression (if any) was present. */
static tree
cp_parser_primary_expression (cp_parser *parser,
bool address_p,
bool cast_p,
bool template_arg_p,
cp_id_kind *idk)
{
cp_token *token;
/* Assume the primary expression is not an id-expression. */
*idk = CP_ID_KIND_NONE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
/* literal:
integer-literal
character-literal
floating-literal
string-literal
boolean-literal */
case CPP_CHAR:
case CPP_WCHAR:
case CPP_NUMBER:
token = cp_lexer_consume_token (parser->lexer);
/* Floating-point literals are only allowed in an integral
constant expression if they are cast to an integral or
enumeration type. */
if (TREE_CODE (token->u.value) == REAL_CST
&& parser->integral_constant_expression_p
&& pedantic)
{
/* CAST_P will be set even in invalid code like "int(2.7 +
...)". Therefore, we have to check that the next token
is sure to end the cast. */
if (cast_p)
{
cp_token *next_token;
next_token = cp_lexer_peek_token (parser->lexer);
if (/* The comma at the end of an
enumerator-definition. */
next_token->type != CPP_COMMA
/* The curly brace at the end of an enum-specifier. */
&& next_token->type != CPP_CLOSE_BRACE
/* The end of a statement. */
&& next_token->type != CPP_SEMICOLON
/* The end of the cast-expression. */
&& next_token->type != CPP_CLOSE_PAREN
/* The end of an array bound. */
&& next_token->type != CPP_CLOSE_SQUARE
/* The closing ">" in a template-argument-list. */
&& (next_token->type != CPP_GREATER
|| parser->greater_than_is_operator_p))
cast_p = false;
}
/* If we are within a cast, then the constraint that the
cast is to an integral or enumeration type will be
checked at that point. If we are not within a cast, then
this code is invalid. */
if (!cast_p)
cp_parser_non_integral_constant_expression
(parser, "floating-point literal");
}
return token->u.value;
case CPP_STRING:
case CPP_WSTRING:
/* ??? Should wide strings be allowed when parser->translate_strings_p
is false (i.e. in attributes)? If not, we can kill the third
argument to cp_parser_string_literal. */
return cp_parser_string_literal (parser,
parser->translate_strings_p,
true);
case CPP_OPEN_PAREN:
{
tree expr;
bool saved_greater_than_is_operator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* If we see `( { ' then we are looking at the beginning of
a GNU statement-expression. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* Statement-expressions are not allowed by the standard. */
if (pedantic)
pedwarn ("ISO C++ forbids braced-groups within expressions");
/* And they're not allowed outside of a function-body; you
cannot, for example, write:
int i = ({ int j = 3; j + 1; });
at class or namespace scope. */
if (!parser->in_function_body)
error ("statement-expressions are allowed only inside functions");
/* Start the statement-expression. */
expr = begin_stmt_expr ();
/* Parse the compound-statement. */
cp_parser_compound_statement (parser, expr, false);
/* Finish up. */
expr = finish_stmt_expr (expr, false);
}
else
{
/* Parse the parenthesized expression. */
expr = cp_parser_expression (parser, cast_p);
/* Let the front end know that this expression was
enclosed in parentheses. This matters in case, for
example, the expression is of the form `A::B', since
`&A::B' might be a pointer-to-member, but `&(A::B)' is
not. */
finish_parenthesized_expr (expr);
}
/* The `>' token might be the end of a template-id or
template-parameter-list now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Consume the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_end_of_statement (parser);
return expr;
}
case CPP_KEYWORD:
switch (token->keyword)
{
/* These two are the boolean literals. */
case RID_TRUE:
cp_lexer_consume_token (parser->lexer);
return boolean_true_node;
case RID_FALSE:
cp_lexer_consume_token (parser->lexer);
return boolean_false_node;
/* The `__null' literal. */
case RID_NULL:
cp_lexer_consume_token (parser->lexer);
return null_node;
/* Recognize the `this' keyword. */
case RID_THIS:
cp_lexer_consume_token (parser->lexer);
if (parser->local_variables_forbidden_p)
{
error ("%<this%> may not be used in this context");
return error_mark_node;
}
/* Pointers cannot appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser,
"`this'"))
return error_mark_node;
return finish_this_expr ();
/* The `operator' keyword can be the beginning of an
id-expression. */
case RID_OPERATOR:
goto id_expression;
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
/* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and
__func__ are the names of variables -- but they are
treated specially. Therefore, they are handled here,
rather than relying on the generic id-expression logic
below. Grammatically, these names are id-expressions.
Consume the token. */
token = cp_lexer_consume_token (parser->lexer);
/* Look up the name. */
return finish_fname (token->u.value);
case RID_VA_ARG:
{
tree expression;
tree type;
/* The `__builtin_va_arg' construct is used to handle
`va_arg'. Consume the `__builtin_va_arg' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Now, parse the assignment-expression. */
expression = cp_parser_assignment_expression (parser,
/*cast_p=*/false);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, "`,'");
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Using `va_arg' in a constant-expression is not
allowed. */
if (cp_parser_non_integral_constant_expression (parser,
"`va_arg'"))
return error_mark_node;
return build_x_va_arg (expression, type);
}
case RID_OFFSETOF:
return cp_parser_builtin_offsetof (parser);
/* Objective-C++ expressions. */
case RID_AT_ENCODE:
case RID_AT_PROTOCOL:
case RID_AT_SELECTOR:
return cp_parser_objc_expression (parser);
default:
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
/* An id-expression can start with either an identifier, a
`::' as the beginning of a qualified-id, or the "operator"
keyword. */
case CPP_NAME:
case CPP_SCOPE:
case CPP_TEMPLATE_ID:
case CPP_NESTED_NAME_SPECIFIER:
{
tree id_expression;
tree decl;
const char *error_msg;
bool template_p;
bool done;
id_expression:
/* Parse the id-expression. */
id_expression
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
if (id_expression == error_mark_node)
return error_mark_node;
token = cp_lexer_peek_token (parser->lexer);
done = (token->type != CPP_OPEN_SQUARE
&& token->type != CPP_OPEN_PAREN
&& token->type != CPP_DOT
&& token->type != CPP_DEREF
&& token->type != CPP_PLUS_PLUS
&& token->type != CPP_MINUS_MINUS);
/* If we have a template-id, then no further lookup is
required. If the template-id was for a template-class, we
will sometimes have a TYPE_DECL at this point. */
if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR
|| TREE_CODE (id_expression) == TYPE_DECL)
decl = id_expression;
/* Look up the name. */
else
{
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, id_expression,
none_type,
template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls);
/* If the lookup was ambiguous, an error will already have
been issued. */
if (ambiguous_decls)
return error_mark_node;
/* In Objective-C++, an instance variable (ivar) may be preferred
to whatever cp_parser_lookup_name() found. */
decl = objc_lookup_ivar (decl, id_expression);
/* If name lookup gives us a SCOPE_REF, then the
qualifying scope was dependent. */
if (TREE_CODE (decl) == SCOPE_REF)
return decl;
/* Check to see if DECL is a local variable in a context
where that is forbidden. */
if (parser->local_variables_forbidden_p
&& local_variable_p (decl))
{
/* It might be that we only found DECL because we are
trying to be generous with pre-ISO scoping rules.
For example, consider:
int i;
void g() {
for (int i = 0; i < 10; ++i) {}
extern void f(int j = i);
}
Here, name look up will originally find the out
of scope `i'. We need to issue a warning message,
but then use the global `i'. */
decl = check_for_out_of_scope_variable (decl);
if (local_variable_p (decl))
{
error ("local variable %qD may not appear in this context",
decl);
return error_mark_node;
}
}
}
decl = (finish_id_expression
(id_expression, decl, parser->scope,
idk,
parser->integral_constant_expression_p,
parser->allow_non_integral_constant_expression_p,
&parser->non_integral_constant_expression_p,
template_p, done, address_p,
template_arg_p,
&error_msg));
if (error_msg)
cp_parser_error (parser, error_msg);
return decl;
}
/* Anything else is an error. */
default:
/* ...unless we have an Objective-C++ message or string literal, that is. */
if (c_dialect_objc ()
&& (token->type == CPP_OPEN_SQUARE || token->type == CPP_OBJC_STRING))
return cp_parser_objc_expression (parser);
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
}
/* Parse an id-expression.
id-expression:
unqualified-id
qualified-id
qualified-id:
:: [opt] nested-name-specifier template [opt] unqualified-id
:: identifier
:: operator-function-id
:: template-id
Return a representation of the unqualified portion of the
identifier. Sets PARSER->SCOPE to the qualifying scope if there is
a `::' or nested-name-specifier.
Often, if the id-expression was a qualified-id, the caller will
want to make a SCOPE_REF to represent the qualified-id. This
function does not do this in order to avoid wastefully creating
SCOPE_REFs when they are not required.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword.
If CHECK_DEPENDENCY_P is false, then names are looked up inside
uninstantiated templates.
If *TEMPLATE_P is non-NULL, it is set to true iff the
`template' keyword is used to explicitly indicate that the entity
named is a template.
If DECLARATOR_P is true, the id-expression is appearing as part of
a declarator, rather than as part of an expression. */
static tree
cp_parser_id_expression (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
bool *template_p,
bool declarator_p,
bool optional_p)
{
bool global_scope_p;
bool nested_name_specifier_p;
/* Assume the `template' keyword was not used. */
if (template_p)
*template_p = template_keyword_p;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
check_dependency_p,
/*type_p=*/false,
declarator_p)
!= NULL_TREE);
/* If there is a nested-name-specifier, then we are looking at
the first qualified-id production. */
if (nested_name_specifier_p)
{
tree saved_scope;
tree saved_object_scope;
tree saved_qualifying_scope;
tree unqualified_id;
bool is_template;
/* See if the next token is the `template' keyword. */
if (!template_p)
template_p = &is_template;
*template_p = cp_parser_optional_template_keyword (parser);
/* Name lookup we do during the processing of the
unqualified-id might obliterate SCOPE. */
saved_scope = parser->scope;
saved_object_scope = parser->object_scope;
saved_qualifying_scope = parser->qualifying_scope;
/* Process the final unqualified-id. */
unqualified_id = cp_parser_unqualified_id (parser, *template_p,
check_dependency_p,
declarator_p,
/*optional_p=*/false);
/* Restore the SAVED_SCOPE for our caller. */
parser->scope = saved_scope;
parser->object_scope = saved_object_scope;
parser->qualifying_scope = saved_qualifying_scope;
return unqualified_id;
}
/* Otherwise, if we are in global scope, then we are looking at one
of the other qualified-id productions. */
else if (global_scope_p)
{
cp_token *token;
tree id;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an identifier, and the next token is not a "<", then
we can avoid the template-id case. This is an optimization
for this common case. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
return cp_parser_identifier (parser);
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Peek at the next token. (Changes in the token buffer may
have invalidated the pointer obtained above.) */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_NAME:
return cp_parser_identifier (parser);
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
return cp_parser_operator_function_id (parser);
/* Fall through. */
default:
cp_parser_error (parser, "expected id-expression");
return error_mark_node;
}
}
else
return cp_parser_unqualified_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
declarator_p,
optional_p);
}
/* Parse an unqualified-id.
unqualified-id:
identifier
operator-function-id
conversion-function-id
~ class-name
template-id
If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template'
keyword, in a construct like `A::template ...'.
Returns a representation of unqualified-id. For the `identifier'
production, an IDENTIFIER_NODE is returned. For the `~ class-name'
production a BIT_NOT_EXPR is returned; the operand of the
BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the
other productions, see the documentation accompanying the
corresponding parsing functions. If CHECK_DEPENDENCY_P is false,
names are looked up in uninstantiated templates. If DECLARATOR_P
is true, the unqualified-id is appearing as part of a declarator,
rather than as part of an expression. */
static tree
cp_parser_unqualified_id (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool declarator_p,
bool optional_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_NAME:
{
tree id;
/* We don't know yet whether or not this will be a
template-id. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
declarator_p);
/* If it worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, it's an ordinary identifier. */
return cp_parser_identifier (parser);
}
case CPP_TEMPLATE_ID:
return cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
declarator_p);
case CPP_COMPL:
{
tree type_decl;
tree qualifying_scope;
tree object_scope;
tree scope;
bool done;
/* Consume the `~' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the class-name. The standard, as written, seems to
say that:
template <typename T> struct S { ~S (); };
template <typename T> S<T>::~S() {}
is invalid, since `~' must be followed by a class-name, but
`S<T>' is dependent, and so not known to be a class.
That's not right; we need to look in uninstantiated
templates. A further complication arises from:
template <typename T> void f(T t) {
t.T::~T();
}
Here, it is not possible to look up `T' in the scope of `T'
itself. We must look in both the current scope, and the
scope of the containing complete expression.
Yet another issue is:
struct S {
int S;
~S();
};
S::~S() {}
The standard does not seem to say that the `S' in `~S'
should refer to the type `S' and not the data member
`S::S'. */
/* DR 244 says that we look up the name after the "~" in the
same scope as we looked up the qualifying name. That idea
isn't fully worked out; it's more complicated than that. */
scope = parser->scope;
object_scope = parser->object_scope;
qualifying_scope = parser->qualifying_scope;
/* Check for invalid scopes. */
if (scope == error_mark_node)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
if (scope && TREE_CODE (scope) == NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error ("scope %qT before %<~%> is not a class-name", scope);
cp_parser_simulate_error (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
gcc_assert (!scope || TYPE_P (scope));
/* If the name is of the form "X::~X" it's OK. */
token = cp_lexer_peek_token (parser->lexer);
if (scope
&& token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_OPEN_PAREN)
&& constructor_name_p (token->u.value, scope))
{
cp_lexer_consume_token (parser->lexer);
return build_nt (BIT_NOT_EXPR, scope);
}
/* If there was an explicit qualification (S::~T), first look
in the scope given by the qualification (i.e., S). */
done = false;
type_decl = NULL_TREE;
if (scope)
{
cp_parser_parse_tentatively (parser);
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "N::S::~S", look in "N" as well. */
if (!done && scope && qualifying_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = qualifying_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "p->S::~T", look in the scope given by "*p" as well. */
else if (!done && object_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = object_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* Look in the surrounding context. */
if (!done)
{
parser->scope = NULL_TREE;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
}
/* If an error occurred, assume that the name of the
destructor is the same as the name of the qualifying
class. That allows us to keep parsing after running
into ill-formed destructor names. */
if (type_decl == error_mark_node && scope)
return build_nt (BIT_NOT_EXPR, scope);
else if (type_decl == error_mark_node)
return error_mark_node;
/* Check that destructor name and scope match. */
if (declarator_p && scope && !check_dtor_name (scope, type_decl))
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error ("declaration of %<~%T%> as member of %qT",
type_decl, scope);
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* [class.dtor]
A typedef-name that names a class shall not be used as the
identifier in the declarator for a destructor declaration. */
if (declarator_p
&& !DECL_IMPLICIT_TYPEDEF_P (type_decl)
&& !DECL_SELF_REFERENCE_P (type_decl)
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
error ("typedef-name %qD used as destructor declarator",
type_decl);
return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl));
}
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
{
tree id;
/* This could be a template-id, so we try that first. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* We still don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
id = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
id = cp_parser_conversion_function_id (parser);
return id;
}
/* Fall through. */
default:
if (optional_p)
return NULL_TREE;
cp_parser_error (parser, "expected unqualified-id");
return error_mark_node;
}
}
/* Parse an (optional) nested-name-specifier.
nested-name-specifier:
class-or-namespace-name :: nested-name-specifier [opt]
class-or-namespace-name :: template nested-name-specifier [opt]
PARSER->SCOPE should be set appropriately before this function is
called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in
effect. TYPE_P is TRUE if we non-type bindings should be ignored
in name lookups.
Sets PARSER->SCOPE to the class (TYPE) or namespace
(NAMESPACE_DECL) specified by the nested-name-specifier, or leaves
it unchanged if there is no nested-name-specifier. Returns the new
scope iff there is a nested-name-specifier, or NULL_TREE otherwise.
If IS_DECLARATION is TRUE, the nested-name-specifier is known to be
part of a declaration and/or decl-specifier. */
static tree
cp_parser_nested_name_specifier_opt (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
bool success = false;
cp_token_position start = 0;
cp_token *token;
/* Remember where the nested-name-specifier starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
start = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
}
while (true)
{
tree new_scope;
tree old_scope;
tree saved_qualifying_scope;
bool template_keyword_p;
/* Spot cases that cannot be the beginning of a
nested-name-specifier. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is CPP_NESTED_NAME_SPECIFIER, just process
the already parsed nested-name-specifier. */
if (token->type == CPP_NESTED_NAME_SPECIFIER)
{
/* Grab the nested-name-specifier and continue the loop. */
cp_parser_pre_parsed_nested_name_specifier (parser);
/* If we originally encountered this nested-name-specifier
with IS_DECLARATION set to false, we will not have
resolved TYPENAME_TYPEs, so we must do so here. */
if (is_declaration
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
{
new_scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
if (new_scope != error_mark_node)
parser->scope = new_scope;
}
success = true;
continue;
}
/* Spot cases that cannot be the beginning of a
nested-name-specifier. On the second and subsequent times
through the loop, we look for the `template' keyword. */
if (success && token->keyword == RID_TEMPLATE)
;
/* A template-id can start a nested-name-specifier. */
else if (token->type == CPP_TEMPLATE_ID)
;
else
{
/* If the next token is not an identifier, then it is
definitely not a class-or-namespace-name. */
if (token->type != CPP_NAME)
break;
/* If the following token is neither a `<' (to begin a
template-id), nor a `::', then we are not looking at a
nested-name-specifier. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type != CPP_SCOPE
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
break;
}
/* The nested-name-specifier is optional, so we parse
tentatively. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `template' keyword, if this isn't the
first time through the loop. */
if (success)
template_keyword_p = cp_parser_optional_template_keyword (parser);
else
template_keyword_p = false;
/* Save the old scope since the name lookup we are about to do
might destroy it. */
old_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
/* In a declarator-id like "X<T>::I::Y<T>" we must be able to
look up names in "X<T>::I" in order to determine that "Y" is
a template. So, if we have a typename at this point, we make
an effort to look through it. */
if (is_declaration
&& !typename_keyword_p
&& parser->scope
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
parser->scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
/* Parse the qualifying entity. */
new_scope
= cp_parser_class_or_namespace_name (parser,
typename_keyword_p,
template_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, "`::'");
/* If we found what we wanted, we keep going; otherwise, we're
done. */
if (!cp_parser_parse_definitely (parser))
{
bool error_p = false;
/* Restore the OLD_SCOPE since it was valid before the
failed attempt at finding the last
class-or-namespace-name. */
parser->scope = old_scope;
parser->qualifying_scope = saved_qualifying_scope;
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
break;
/* If the next token is an identifier, and the one after
that is a `::', then any valid interpretation would have
found a class-or-namespace-name. */
while (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_SCOPE)
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_COMPL))
{
token = cp_lexer_consume_token (parser->lexer);
if (!error_p)
{
if (!token->ambiguous_p)
{
tree decl;
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, token->u.value,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls);
if (TREE_CODE (decl) == TEMPLATE_DECL)
error ("%qD used without template parameters", decl);
else if (ambiguous_decls)
{
error ("reference to %qD is ambiguous",
token->u.value);
print_candidates (ambiguous_decls);
decl = error_mark_node;
}
else
cp_parser_name_lookup_error
(parser, token->u.value, decl,
"is not a class or namespace");
}
parser->scope = error_mark_node;
error_p = true;
/* Treat this as a successful nested-name-specifier
due to:
[basic.lookup.qual]
If the name found is not a class-name (clause
_class_) or namespace-name (_namespace.def_), the
program is ill-formed. */
success = true;
}
cp_lexer_consume_token (parser->lexer);
}
break;
}
/* We've found one valid nested-name-specifier. */
success = true;
/* Name lookup always gives us a DECL. */
if (TREE_CODE (new_scope) == TYPE_DECL)
new_scope = TREE_TYPE (new_scope);
/* Uses of "template" must be followed by actual templates. */
if (template_keyword_p
&& !(CLASS_TYPE_P (new_scope)
&& ((CLASSTYPE_USE_TEMPLATE (new_scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope)))
|| CLASSTYPE_IS_TEMPLATE (new_scope)))
&& !(TREE_CODE (new_scope) == TYPENAME_TYPE
&& (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope))
== TEMPLATE_ID_EXPR)))
pedwarn (TYPE_P (new_scope)
? "%qT is not a template"
: "%qD is not a template",
new_scope);
/* If it is a class scope, try to complete it; we are about to
be looking up names inside the class. */
if (TYPE_P (new_scope)
/* Since checking types for dependency can be expensive,
avoid doing it if the type is already complete. */
&& !COMPLETE_TYPE_P (new_scope)
/* Do not try to complete dependent types. */
&& !dependent_type_p (new_scope))
new_scope = complete_type (new_scope);
/* Make sure we look in the right scope the next time through
the loop. */
parser->scope = new_scope;
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER
token. That way, should we re-parse the token stream, we will
not have to repeat the effort required to do the parse, nor will
we issue duplicate error messages. */
if (success && start)
{
cp_token *token;
token = cp_lexer_token_at (parser->lexer, start);
/* Reset the contents of the START token. */
token->type = CPP_NESTED_NAME_SPECIFIER;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = GGC_CNEW (struct tree_check);
token->u.tree_check_value->value = parser->scope;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->u.tree_check_value->qualifying_scope =
parser->qualifying_scope;
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start);
}
if (start)
pop_to_parent_deferring_access_checks ();
return success ? parser->scope : NULL_TREE;
}
/* Parse a nested-name-specifier. See
cp_parser_nested_name_specifier_opt for details. This function
behaves identically, except that it will an issue an error if no
nested-name-specifier is present. */
static tree
cp_parser_nested_name_specifier (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree scope;
/* Look for the nested-name-specifier. */
scope = cp_parser_nested_name_specifier_opt (parser,
typename_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* If it was not present, issue an error message. */
if (!scope)
{
cp_parser_error (parser, "expected nested-name-specifier");
parser->scope = NULL_TREE;
}
return scope;
}
/* Parse a class-or-namespace-name.
class-or-namespace-name:
class-name
namespace-name
TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect.
TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect.
CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up.
TYPE_P is TRUE iff the next name should be taken as a class-name,
even the same name is declared to be another entity in the same
scope.
Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL)
specified by the class-or-namespace-name. If neither is found the
ERROR_MARK_NODE is returned. */
static tree
cp_parser_class_or_namespace_name (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree scope;
bool only_class_p;
/* Before we try to parse the class-name, we must save away the
current PARSER->SCOPE since cp_parser_class_name will destroy
it. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* Try for a class-name first. If the SAVED_SCOPE is a type, then
there is no need to look for a namespace-name. */
only_class_p = template_keyword_p || (saved_scope && TYPE_P (saved_scope));
if (!only_class_p)
cp_parser_parse_tentatively (parser);
scope = cp_parser_class_name (parser,
typename_keyword_p,
template_keyword_p,
type_p ? class_type : none_type,
check_dependency_p,
/*class_head_p=*/false,
is_declaration);
/* If that didn't work, try for a namespace-name. */
if (!only_class_p && !cp_parser_parse_definitely (parser))
{
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If we are not looking at an identifier followed by the scope
resolution operator, then this is not part of a
nested-name-specifier. (Note that this function is only used
to parse the components of a nested-name-specifier.) */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE)
return error_mark_node;
scope = cp_parser_namespace_name (parser);
}
return scope;
}
/* Parse a postfix-expression.
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( expression-list [opt] )
simple-type-specifier ( expression-list [opt] )
typename :: [opt] nested-name-specifier identifier
( expression-list [opt] )
typename :: [opt] nested-name-specifier template [opt] template-id
( expression-list [opt] )
postfix-expression . template [opt] id-expression
postfix-expression -> template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> pseudo-destructor-name
postfix-expression ++
postfix-expression --
dynamic_cast < type-id > ( expression )
static_cast < type-id > ( expression )
reinterpret_cast < type-id > ( expression )
const_cast < type-id > ( expression )
typeid ( expression )
typeid ( type-id )
GNU Extension:
postfix-expression:
( type-id ) { initializer-list , [opt] }
This extension is a GNU version of the C99 compound-literal
construct. (The C99 grammar uses `type-name' instead of `type-id',
but they are essentially the same concept.)
If ADDRESS_P is true, the postfix expression is the operand of the
`&' operator. CAST_P is true if this expression is the target of a
cast.
Returns a representation of the expression. */
static tree
cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p)
{
cp_token *token;
enum rid keyword;
cp_id_kind idk = CP_ID_KIND_NONE;
tree postfix_expression = NULL_TREE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Some of the productions are determined by keywords. */
keyword = token->keyword;
switch (keyword)
{
case RID_DYNCAST:
case RID_STATCAST:
case RID_REINTCAST:
case RID_CONSTCAST:
{
tree type;
tree expression;
const char *saved_message;
/* All of these can be handled in the same way from the point
of view of parsing. Begin by consuming the token
identifying the cast. */
cp_lexer_consume_token (parser->lexer);
/* New types cannot be defined in the cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in casts";
/* Look for the opening `<'. */
cp_parser_require (parser, CPP_LESS, "`<'");
/* Parse the type to which we are casting. */
type = cp_parser_type_id (parser);
/* Look for the closing `>'. */
cp_parser_require (parser, CPP_GREATER, "`>'");
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
/* And the expression which is being cast. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
expression = cp_parser_expression (parser, /*cast_p=*/true);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& (cp_parser_non_integral_constant_expression
(parser,
"a cast to a type other than an integral or "
"enumeration type")))
return error_mark_node;
switch (keyword)
{
case RID_DYNCAST:
postfix_expression
= build_dynamic_cast (type, expression);
break;
case RID_STATCAST:
postfix_expression
= build_static_cast (type, expression);
break;
case RID_REINTCAST:
postfix_expression
= build_reinterpret_cast (type, expression);
break;
case RID_CONSTCAST:
postfix_expression
= build_const_cast (type, expression);
break;
default:
gcc_unreachable ();
}
}
break;
case RID_TYPEID:
{
tree type;
const char *saved_message;
bool saved_in_type_id_in_expr_p;
/* Consume the `typeid' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `(' token. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Types cannot be defined in a `typeid' expression. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in a `typeid\' expression";
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Try a type-id first. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)' token. Otherwise, we can't be sure that
we're not looking at an expression: consider `typeid (int
(3))', for example. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* If all went well, simply lookup the type-id. */
if (cp_parser_parse_definitely (parser))
postfix_expression = get_typeid (type);
/* Otherwise, fall back to the expression variant. */
else
{
tree expression;
/* Look for an expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false);
/* Compute its typeid. */
postfix_expression = build_typeid (expression);
/* Look for the `)' token. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* `typeid' may not appear in an integral constant expression. */
if (cp_parser_non_integral_constant_expression(parser,
"`typeid' operator"))
return error_mark_node;
}
break;
case RID_TYPENAME:
{
tree type;
/* The syntax permitted here is the same permitted for an
elaborated-type-specifier. */
type = cp_parser_elaborated_type_specifier (parser,
/*is_friend=*/false,
/*is_declaration=*/false);
postfix_expression = cp_parser_functional_cast (parser, type);
}
break;
default:
{
tree type;
/* If the next thing is a simple-type-specifier, we may be
looking at a functional cast. We could also be looking at
an id-expression. So, we try the functional cast, and if
that doesn't work we fall back to the primary-expression. */
cp_parser_parse_tentatively (parser);
/* Look for the simple-type-specifier. */
type = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
/* Parse the cast itself. */
if (!cp_parser_error_occurred (parser))
postfix_expression
= cp_parser_functional_cast (parser, type);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
break;
/* If the functional-cast didn't work out, try a
compound-literal. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
VEC(constructor_elt,gc) *initializer_list = NULL;
bool saved_in_type_id_in_expr_p;
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Look for the `{'. */
cp_parser_require (parser, CPP_OPEN_BRACE, "`{'");
/* If things aren't going well, there's no need to
keep going. */
if (!cp_parser_error_occurred (parser))
{
bool non_constant_p;
/* Parse the initializer-list. */
initializer_list
= cp_parser_initializer_list (parser, &non_constant_p);
/* Allow a trailing `,'. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
/* Look for the final `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
}
/* If that worked, we're definitely looking at a
compound-literal expression. */
if (cp_parser_parse_definitely (parser))
{
/* Warn the user that a compound literal is not
allowed in standard C++. */
if (pedantic)
pedwarn ("ISO C++ forbids compound-literals");
/* For simplicitly, we disallow compound literals in
constant-expressions for simpliicitly. We could
allow compound literals of integer type, whose
initializer was a constant, in constant
expressions. Permitting that usage, as a further
extension, would not change the meaning of any
currently accepted programs. (Of course, as
compound literals are not part of ISO C++, the
standard has nothing to say.) */
if (cp_parser_non_integral_constant_expression
(parser, "non-constant compound literals"))
{
postfix_expression = error_mark_node;
break;
}
/* Form the representation of the compound-literal. */
postfix_expression
= finish_compound_literal (type, initializer_list);
break;
}
}
/* It must be a primary-expression. */
postfix_expression
= cp_parser_primary_expression (parser, address_p, cast_p,
/*template_arg_p=*/false,
&idk);
}
break;
}
/* Keep looping until the postfix-expression is complete. */
while (true)
{
if (idk == CP_ID_KIND_UNQUALIFIED
&& TREE_CODE (postfix_expression) == IDENTIFIER_NODE
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
/* It is not a Koenig lookup function call. */
postfix_expression
= unqualified_name_lookup_error (postfix_expression);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
postfix_expression
= cp_parser_postfix_open_square_expression (parser,
postfix_expression,
false);
idk = CP_ID_KIND_NONE;
break;
case CPP_OPEN_PAREN:
/* postfix-expression ( expression-list [opt] ) */
{
bool koenig_p;
bool is_builtin_constant_p;
bool saved_integral_constant_expression_p = false;
bool saved_non_integral_constant_expression_p = false;
tree args;
is_builtin_constant_p
= DECL_IS_BUILTIN_CONSTANT_P (postfix_expression);
if (is_builtin_constant_p)
{
/* The whole point of __builtin_constant_p is to allow
non-constant expressions to appear as arguments. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
}
args = (cp_parser_parenthesized_expression_list
(parser, /*is_attribute_list=*/false,
/*cast_p=*/false,
/*non_constant_p=*/NULL));
if (is_builtin_constant_p)
{
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
}
if (args == error_mark_node)
{
postfix_expression = error_mark_node;
break;
}
/* Function calls are not permitted in
constant-expressions. */
if (! builtin_valid_in_constant_expr_p (postfix_expression)
&& cp_parser_non_integral_constant_expression (parser,
"a function call"))
{
postfix_expression = error_mark_node;
break;
}
koenig_p = false;
if (idk == CP_ID_KIND_UNQUALIFIED)
{
if (TREE_CODE (postfix_expression) == IDENTIFIER_NODE)
{
if (args)
{
koenig_p = true;
postfix_expression
= perform_koenig_lookup (postfix_expression, args);
}
else
postfix_expression
= unqualified_fn_lookup_error (postfix_expression);
}
/* We do not perform argument-dependent lookup if
normal lookup finds a non-function, in accordance
with the expected resolution of DR 218. */
else if (args && is_overloaded_fn (postfix_expression))
{
tree fn = get_first_fn (postfix_expression);
if (TREE_CODE (fn) == TEMPLATE_ID_EXPR)
fn = OVL_CURRENT (TREE_OPERAND (fn, 0));
/* Only do argument dependent lookup if regular
lookup does not find a set of member functions.
[basic.lookup.koenig]/2a */
if (!DECL_FUNCTION_MEMBER_P (fn))
{
koenig_p = true;
postfix_expression
= perform_koenig_lookup (postfix_expression, args);
}
}
}
if (TREE_CODE (postfix_expression) == COMPONENT_REF)
{
tree instance = TREE_OPERAND (postfix_expression, 0);
tree fn = TREE_OPERAND (postfix_expression, 1);
if (processing_template_decl
&& (type_dependent_expression_p (instance)
|| (!BASELINK_P (fn)
&& TREE_CODE (fn) != FIELD_DECL)
|| type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (args)))
{
postfix_expression
= build_min_nt (CALL_EXPR, postfix_expression,
args, NULL_TREE);
break;
}
if (BASELINK_P (fn))
postfix_expression
= (build_new_method_call
(instance, fn, args, NULL_TREE,
(idk == CP_ID_KIND_QUALIFIED
? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL),
/*fn_p=*/NULL));
else
postfix_expression
= finish_call_expr (postfix_expression, args,
/*disallow_virtual=*/false,
/*koenig_p=*/false);
}
else if (TREE_CODE (postfix_expression) == OFFSET_REF
|| TREE_CODE (postfix_expression) == MEMBER_REF
|| TREE_CODE (postfix_expression) == DOTSTAR_EXPR)
postfix_expression = (build_offset_ref_call_from_tree
(postfix_expression, args));
else if (idk == CP_ID_KIND_QUALIFIED)
/* A call to a static class member, or a namespace-scope
function. */
postfix_expression
= finish_call_expr (postfix_expression, args,
/*disallow_virtual=*/true,
koenig_p);
else
/* All other function calls. */
postfix_expression
= finish_call_expr (postfix_expression, args,
/*disallow_virtual=*/false,
koenig_p);
/* The POSTFIX_EXPRESSION is certainly no longer an id. */
idk = CP_ID_KIND_NONE;
}
break;
case CPP_DOT:
case CPP_DEREF:
/* postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name */
/* Consume the `.' or `->' operator. */
cp_lexer_consume_token (parser->lexer);
postfix_expression
= cp_parser_postfix_dot_deref_expression (parser, token->type,
postfix_expression,
false, &idk);
break;
case CPP_PLUS_PLUS:
/* postfix-expression ++ */
/* Consume the `++' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTINCREMENT_EXPR);
/* Increments may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser,
"an increment"))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
break;
case CPP_MINUS_MINUS:
/* postfix-expression -- */
/* Consume the `--' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTDECREMENT_EXPR);
/* Decrements may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser,
"a decrement"))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
break;
default:
return postfix_expression;
}
}
/* We should never get here. */
gcc_unreachable ();
return error_mark_node;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression [ expression ]
FOR_OFFSETOF is set if we're being called in that context, which
changes how we deal with integer constant expressions. */
static tree
cp_parser_postfix_open_square_expression (cp_parser *parser,
tree postfix_expression,
bool for_offsetof)
{
tree index;
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the index expression. */
/* ??? For offsetof, there is a question of what to allow here. If
offsetof is not being used in an integral constant expression context,
then we *could* get the right answer by computing the value at runtime.
If we are in an integral constant expression context, then we might
could accept any constant expression; hard to say without analysis.
Rather than open the barn door too wide right away, allow only integer
constant expressions here. */
if (for_offsetof)
index = cp_parser_constant_expression (parser, false, NULL);
else
index = cp_parser_expression (parser, /*cast_p=*/false);
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'");
/* Build the ARRAY_REF. */
postfix_expression = grok_array_decl (postfix_expression, index);
/* When not doing offsetof, array references are not permitted in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression
(parser, "an array reference")))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name
FOR_OFFSETOF is set if we're being called in that context. That sorta
limits what of the above we'll actually accept, but nevermind.
TOKEN_TYPE is the "." or "->" token, which will already have been
removed from the stream. */
static tree
cp_parser_postfix_dot_deref_expression (cp_parser *parser,
enum cpp_ttype token_type,
tree postfix_expression,
bool for_offsetof, cp_id_kind *idk)
{
tree name;
bool dependent_p;
bool pseudo_destructor_p;
tree scope = NULL_TREE;
/* If this is a `->' operator, dereference the pointer. */
if (token_type == CPP_DEREF)
postfix_expression = build_x_arrow (postfix_expression);
/* Check to see whether or not the expression is type-dependent. */
dependent_p = type_dependent_expression_p (postfix_expression);
/* The identifier following the `->' or `.' is not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
*idk = CP_ID_KIND_NONE;
/* Enter the scope corresponding to the type of the object
given by the POSTFIX_EXPRESSION. */
if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE)
{
scope = TREE_TYPE (postfix_expression);
/* According to the standard, no expression should ever have
reference type. Unfortunately, we do not currently match
the standard in this respect in that our internal representation
of an expression may have reference type even when the standard
says it does not. Therefore, we have to manually obtain the
underlying type here. */
scope = non_reference (scope);
/* The type of the POSTFIX_EXPRESSION must be complete. */
if (scope == unknown_type_node)
{
error ("%qE does not have class type", postfix_expression);
scope = NULL_TREE;
}
else
scope = complete_type_or_else (scope, NULL_TREE);
/* Let the name lookup machinery know that we are processing a
class member access expression. */
parser->context->object_type = scope;
/* If something went wrong, we want to be able to discern that case,
as opposed to the case where there was no SCOPE due to the type
of expression being dependent. */
if (!scope)
scope = error_mark_node;
/* If the SCOPE was erroneous, make the various semantic analysis
functions exit quickly -- and without issuing additional error
messages. */
if (scope == error_mark_node)
postfix_expression = error_mark_node;
}
/* Assume this expression is not a pseudo-destructor access. */
pseudo_destructor_p = false;
/* If the SCOPE is a scalar type, then, if this is a valid program,
we must be looking at a pseudo-destructor-name. */
if (scope && SCALAR_TYPE_P (scope))
{
tree s;
tree type;
cp_parser_parse_tentatively (parser);
/* Parse the pseudo-destructor-name. */
s = NULL_TREE;
cp_parser_pseudo_destructor_name (parser, &s, &type);
if (cp_parser_parse_definitely (parser))
{
pseudo_destructor_p = true;
postfix_expression
= finish_pseudo_destructor_expr (postfix_expression,
s, TREE_TYPE (type));
}
}
if (!pseudo_destructor_p)
{
/* If the SCOPE is not a scalar type, we are looking at an
ordinary class member access expression, rather than a
pseudo-destructor-name. */
bool template_p;
/* Parse the id-expression. */
name = (cp_parser_id_expression
(parser,
cp_parser_optional_template_keyword (parser),
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false));
/* In general, build a SCOPE_REF if the member name is qualified.
However, if the name was not dependent and has already been
resolved; there is no need to build the SCOPE_REF. For example;
struct X { void f(); };
template <typename T> void f(T* t) { t->X::f(); }
Even though "t" is dependent, "X::f" is not and has been resolved
to a BASELINK; there is no need to include scope information. */
/* But we do need to remember that there was an explicit scope for
virtual function calls. */
if (parser->scope)
*idk = CP_ID_KIND_QUALIFIED;
/* If the name is a template-id that names a type, we will get a
TYPE_DECL here. That is invalid code. */
if (TREE_CODE (name) == TYPE_DECL)
{
error ("invalid use of %qD", name);
postfix_expression = error_mark_node;
}
else
{
if (name != error_mark_node && !BASELINK_P (name) && parser->scope)
{
name = build_qualified_name (/*type=*/NULL_TREE,
parser->scope,
name,
template_p);
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
if (scope && name && BASELINK_P (name))
adjust_result_of_qualified_name_lookup
(name, BINFO_TYPE (BASELINK_ACCESS_BINFO (name)), scope);
postfix_expression
= finish_class_member_access_expr (postfix_expression, name,
template_p);
}
}
/* We no longer need to look up names in the scope of the object on
the left-hand side of the `.' or `->' operator. */
parser->context->object_type = NULL_TREE;
/* Outside of offsetof, these operators may not appear in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression
(parser, token_type == CPP_DEREF ? "'->'" : "`.'")))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* Parse a parenthesized expression-list.
expression-list:
assignment-expression
expression-list, assignment-expression
attribute-list:
expression-list
identifier
identifier, expression-list
CAST_P is true if this expression is the target of a cast.
Returns a TREE_LIST. The TREE_VALUE of each node is a
representation of an assignment-expression. Note that a TREE_LIST
is returned even if there is only a single expression in the list.
error_mark_node is returned if the ( and or ) are
missing. NULL_TREE is returned on no expressions. The parentheses
are eaten. IS_ATTRIBUTE_LIST is true if this is really an attribute
list being parsed. If NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P
indicates whether or not all of the expressions in the list were
constant. */
static tree
cp_parser_parenthesized_expression_list (cp_parser* parser,
bool is_attribute_list,
bool cast_p,
bool *non_constant_p)
{
tree expression_list = NULL_TREE;
bool fold_expr_p = is_attribute_list;
tree identifier = NULL_TREE;
/* Assume all the expressions will be constant. */
if (non_constant_p)
*non_constant_p = false;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
return error_mark_node;
/* Consume expressions until there are no more. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
while (true)
{
tree expr;
/* At the beginning of attribute lists, check to see if the
next token is an identifier. */
if (is_attribute_list
&& cp_lexer_peek_token (parser->lexer)->type == CPP_NAME)
{
cp_token *token;
/* Consume the identifier. */
token = cp_lexer_consume_token (parser->lexer);
/* Save the identifier. */
identifier = token->u.value;
}
else
{
/* Parse the next assignment-expression. */
if (non_constant_p)
{
bool expr_non_constant_p;
expr = (cp_parser_constant_expression
(parser, /*allow_non_constant_p=*/true,
&expr_non_constant_p));
if (expr_non_constant_p)
*non_constant_p = true;
}
else
expr = cp_parser_assignment_expression (parser, cast_p);
if (fold_expr_p)
expr = fold_non_dependent_expr (expr);
/* Add it to the list. We add error_mark_node
expressions to the list, so that we can still tell if
the correct form for a parenthesized expression-list
is found. That gives better errors. */
expression_list = tree_cons (NULL_TREE, expr, expression_list);
if (expr == error_mark_node)
goto skip_comma;
}
/* After the first item, attribute lists look the same as
expression lists. */
is_attribute_list = false;
get_comma:;
/* If the next token isn't a `,', then we are done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
{
int ending;
skip_comma:;
/* We try and resync to an unnested comma, as that will give the
user better diagnostics. */
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
if (!ending)
return error_mark_node;
}
/* We built up the list in reverse order so we must reverse it now. */
expression_list = nreverse (expression_list);
if (identifier)
expression_list = tree_cons (NULL_TREE, identifier, expression_list);
return expression_list;
}
/* Parse a pseudo-destructor-name.
pseudo-destructor-name:
:: [opt] nested-name-specifier [opt] type-name :: ~ type-name
:: [opt] nested-name-specifier template template-id :: ~ type-name
:: [opt] nested-name-specifier [opt] ~ type-name
If either of the first two productions is used, sets *SCOPE to the
TYPE specified before the final `::'. Otherwise, *SCOPE is set to
NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name,
or ERROR_MARK_NODE if the parse fails. */
static void
cp_parser_pseudo_destructor_name (cp_parser* parser,
tree* scope,
tree* type)
{
bool nested_name_specifier_p;
/* Assume that things will not work out. */
*type = error_mark_node;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true)
!= NULL_TREE);
/* Now, if we saw a nested-name-specifier, we might be doing the
second production. */
if (nested_name_specifier_p
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-id. */
cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/false,
/*is_declaration=*/true);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, "`::'");
}
/* If the next token is not a `~', then there might be some
additional qualification. */
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL))
{
/* Look for the type-name. */
*scope = TREE_TYPE (cp_parser_type_name (parser));
if (*scope == error_mark_node)
return;
/* If we don't have ::~, then something has gone wrong. Since
the only caller of this function is looking for something
after `.' or `->' after a scalar type, most likely the
program is trying to get a member of a non-aggregate
type. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE)
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_COMPL)
{
cp_parser_error (parser, "request for member of non-aggregate type");
return;
}
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, "`::'");
}
else
*scope = NULL_TREE;
/* Look for the `~'. */
cp_parser_require (parser, CPP_COMPL, "`~'");
/* Look for the type-name again. We are not responsible for
checking that it matches the first type-name. */
*type = cp_parser_type_name (parser);
}
/* Parse a unary-expression.
unary-expression:
postfix-expression
++ cast-expression
-- cast-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-id )
new-expression
delete-expression
GNU Extensions:
unary-expression:
__extension__ cast-expression
__alignof__ unary-expression
__alignof__ ( type-id )
__real__ cast-expression
__imag__ cast-expression
&& identifier
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p)
{
cp_token *token;
enum tree_code unary_operator;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Some keywords give away the kind of expression. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_ALIGNOF:
case RID_SIZEOF:
{
tree operand;
enum tree_code op;
op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the operand. */
operand = cp_parser_sizeof_operand (parser, keyword);
if (TYPE_P (operand))
return cxx_sizeof_or_alignof_type (operand, op, true);
else
return cxx_sizeof_or_alignof_expr (operand, op);
}
case RID_NEW:
return cp_parser_new_expression (parser);
case RID_DELETE:
return cp_parser_delete_expression (parser);
case RID_EXTENSION:
{
/* The saved value of the PEDANTIC flag. */
int saved_pedantic;
tree expr;
/* Save away the PEDANTIC flag. */
cp_parser_extension_opt (parser, &saved_pedantic);
/* Parse the cast-expression. */
expr = cp_parser_simple_cast_expression (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return expr;
}
case RID_REALPART:
case RID_IMAGPART:
{
tree expression;
/* Consume the `__real__' or `__imag__' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* Create the complete representation. */
return build_x_unary_op ((keyword == RID_REALPART
? REALPART_EXPR : IMAGPART_EXPR),
expression);
}
break;
default:
break;
}
}
/* Look for the `:: new' and `:: delete', which also signal the
beginning of a new-expression, or delete-expression,
respectively. If the next token is `::', then it might be one of
these. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
{
enum rid keyword;
/* See if the token after the `::' is one of the keywords in
which we're interested. */
keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword;
/* If it's `new', we have a new-expression. */
if (keyword == RID_NEW)
return cp_parser_new_expression (parser);
/* Similarly, for `delete'. */
else if (keyword == RID_DELETE)
return cp_parser_delete_expression (parser);
}
/* Look for a unary operator. */
unary_operator = cp_parser_unary_operator (token);
/* The `++' and `--' operators can be handled similarly, even though
they are not technically unary-operators in the grammar. */
if (unary_operator == ERROR_MARK)
{
if (token->type == CPP_PLUS_PLUS)
unary_operator = PREINCREMENT_EXPR;
else if (token->type == CPP_MINUS_MINUS)
unary_operator = PREDECREMENT_EXPR;
/* Handle the GNU address-of-label extension. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& token->type == CPP_AND_AND)
{
tree identifier;
/* Consume the '&&' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
/* Create an expression representing the address. */
return finish_label_address_expr (identifier);
}
}
if (unary_operator != ERROR_MARK)
{
tree cast_expression;
tree expression = error_mark_node;
const char *non_constant_p = NULL;
/* Consume the operator token. */
token = cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
cast_expression
= cp_parser_cast_expression (parser,
unary_operator == ADDR_EXPR,
/*cast_p=*/false);
/* Now, build an appropriate representation. */
switch (unary_operator)
{
case INDIRECT_REF:
non_constant_p = "`*'";
expression = build_x_indirect_ref (cast_expression, "unary *");
break;
case ADDR_EXPR:
non_constant_p = "`&'";
/* Fall through. */
case BIT_NOT_EXPR:
expression = build_x_unary_op (unary_operator, cast_expression);
break;
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
non_constant_p = (unary_operator == PREINCREMENT_EXPR
? "`++'" : "`--'");
/* Fall through. */
case UNARY_PLUS_EXPR:
case NEGATE_EXPR:
case TRUTH_NOT_EXPR:
expression = finish_unary_op_expr (unary_operator, cast_expression);
break;
default:
gcc_unreachable ();
}
if (non_constant_p
&& cp_parser_non_integral_constant_expression (parser,
non_constant_p))
expression = error_mark_node;
return expression;
}
return cp_parser_postfix_expression (parser, address_p, cast_p);
}
/* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a
unary-operator, the corresponding tree code is returned. */
static enum tree_code
cp_parser_unary_operator (cp_token* token)
{
switch (token->type)
{
case CPP_MULT:
return INDIRECT_REF;
case CPP_AND:
return ADDR_EXPR;
case CPP_PLUS:
return UNARY_PLUS_EXPR;
case CPP_MINUS:
return NEGATE_EXPR;
case CPP_NOT:
return TRUTH_NOT_EXPR;
case CPP_COMPL:
return BIT_NOT_EXPR;
default:
return ERROR_MARK;
}
}
/* Parse a new-expression.
new-expression:
:: [opt] new new-placement [opt] new-type-id new-initializer [opt]
:: [opt] new new-placement [opt] ( type-id ) new-initializer [opt]
Returns a representation of the expression. */
static tree
cp_parser_new_expression (cp_parser* parser)
{
bool global_scope_p;
tree placement;
tree type;
tree initializer;
tree nelts;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `new' operator. */
cp_parser_require_keyword (parser, RID_NEW, "`new'");
/* There's no easy way to tell a new-placement from the
`( type-id )' construct. */
cp_parser_parse_tentatively (parser);
/* Look for a new-placement. */
placement = cp_parser_new_placement (parser);
/* If that didn't work out, there's no new-placement. */
if (!cp_parser_parse_definitely (parser))
placement = NULL_TREE;
/* If the next token is a `(', then we have a parenthesized
type-id. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* There should not be a direct-new-declarator in this production,
but GCC used to allowed this, so we check and emit a sensible error
message for this case. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
error ("array bound forbidden after parenthesized type-id");
inform ("try removing the parentheses around the type-id");
cp_parser_direct_new_declarator (parser);
}
nelts = NULL_TREE;
}
/* Otherwise, there must be a new-type-id. */
else
type = cp_parser_new_type_id (parser, &nelts);
/* If the next token is a `(', then we have a new-initializer. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
initializer = cp_parser_new_initializer (parser);
else
initializer = NULL_TREE;
/* A new-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, "`new'"))
return error_mark_node;
/* Create a representation of the new-expression. */
return build_new (placement, type, nelts, initializer, global_scope_p);
}
/* Parse a new-placement.
new-placement:
( expression-list )
Returns the same representation as for an expression-list. */
static tree
cp_parser_new_placement (cp_parser* parser)
{
tree expression_list;
/* Parse the expression-list. */
expression_list = (cp_parser_parenthesized_expression_list
(parser, false, /*cast_p=*/false,
/*non_constant_p=*/NULL));
return expression_list;
}
/* Parse a new-type-id.
new-type-id:
type-specifier-seq new-declarator [opt]
Returns the TYPE allocated. If the new-type-id indicates an array
type, *NELTS is set to the number of elements in the last array
bound; the TYPE will not include the last array bound. */
static tree
cp_parser_new_type_id (cp_parser* parser, tree *nelts)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *new_declarator;
cp_declarator *declarator;
cp_declarator *outer_declarator;
const char *saved_message;
tree type;
/* The type-specifier sequence must not contain type definitions.
(It cannot contain declarations of new types either, but if they
are not definitions we will catch that because they are not
complete.) */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in a new-type-id";
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_condition=*/false,
&type_specifier_seq);
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
/* Parse the new-declarator. */
new_declarator = cp_parser_new_declarator_opt (parser);
/* Determine the number of elements in the last array dimension, if
any. */
*nelts = NULL_TREE;
/* Skip down to the last array dimension. */
declarator = new_declarator;
outer_declarator = NULL;
while (declarator && (declarator->kind == cdk_pointer
|| declarator->kind == cdk_ptrmem))
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
while (declarator
&& declarator->kind == cdk_array
&& declarator->declarator
&& declarator->declarator->kind == cdk_array)
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
if (declarator && declarator->kind == cdk_array)
{
*nelts = declarator->u.array.bounds;
if (*nelts == error_mark_node)
*nelts = integer_one_node;
if (outer_declarator)
outer_declarator->declarator = declarator->declarator;
else
new_declarator = NULL;
}
type = groktypename (&type_specifier_seq, new_declarator);
if (TREE_CODE (type) == ARRAY_TYPE && *nelts == NULL_TREE)
{
*nelts = array_type_nelts_top (type);
type = TREE_TYPE (type);
}
return type;
}
/* Parse an (optional) new-declarator.
new-declarator:
ptr-operator new-declarator [opt]
direct-new-declarator
Returns the declarator. */
static cp_declarator *
cp_parser_new_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree type;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Look for a ptr-operator. */
code = cp_parser_ptr_operator (parser, &type, &cv_quals);
/* If that worked, look for more new-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_new_declarator_opt (parser);
/* Create the representation of the declarator. */
if (type)
declarator = make_ptrmem_declarator (cv_quals, type, declarator);
else if (code == INDIRECT_REF)
declarator = make_pointer_declarator (cv_quals, declarator);
else
declarator = make_reference_declarator (cv_quals, declarator);
return declarator;
}
/* If the next token is a `[', there is a direct-new-declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
return cp_parser_direct_new_declarator (parser);
return NULL;
}
/* Parse a direct-new-declarator.
direct-new-declarator:
[ expression ]
direct-new-declarator [constant-expression]
*/
static cp_declarator *
cp_parser_direct_new_declarator (cp_parser* parser)
{
cp_declarator *declarator = NULL;
while (true)
{
tree expression;
/* Look for the opening `['. */
cp_parser_require (parser, CPP_OPEN_SQUARE, "`['");
/* The first expression is not required to be constant. */
if (!declarator)
{
expression = cp_parser_expression (parser, /*cast_p=*/false);
/* The standard requires that the expression have integral
type. DR 74 adds enumeration types. We believe that the
real intent is that these expressions be handled like the
expression in a `switch' condition, which also allows
classes with a single conversion to integral or
enumeration type. */
if (!processing_template_decl)
{
expression
= build_expr_type_conversion (WANT_INT | WANT_ENUM,
expression,
/*complain=*/true);
if (!expression)
{
error ("expression in new-declarator must have integral "
"or enumeration type");
expression = error_mark_node;
}
}
}
/* But all the other expressions must be. */
else
expression
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'");
/* Add this bound to the declarator. */
declarator = make_array_declarator (declarator, expression);
/* If the next token is not a `[', then there are no more
bounds. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
break;
}
return declarator;
}
/* Parse a new-initializer.
new-initializer:
( expression-list [opt] )
Returns a representation of the expression-list. If there is no
expression-list, VOID_ZERO_NODE is returned. */
static tree
cp_parser_new_initializer (cp_parser* parser)
{
tree expression_list;
expression_list = (cp_parser_parenthesized_expression_list
(parser, false, /*cast_p=*/false,
/*non_constant_p=*/NULL));
if (!expression_list)
expression_list = void_zero_node;
return expression_list;
}
/* Parse a delete-expression.
delete-expression:
:: [opt] delete cast-expression
:: [opt] delete [ ] cast-expression
Returns a representation of the expression. */
static tree
cp_parser_delete_expression (cp_parser* parser)
{
bool global_scope_p;
bool array_p;
tree expression;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `delete' keyword. */
cp_parser_require_keyword (parser, RID_DELETE, "`delete'");
/* See if the array syntax is in use. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'");
/* Remember that this is the `[]' construct. */
array_p = true;
}
else
array_p = false;
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* A delete-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, "`delete'"))
return error_mark_node;
return delete_sanity (expression, NULL_TREE, array_p, global_scope_p);
}
/* Parse a cast-expression.
cast-expression:
unary-expression
( type-id ) cast-expression
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p)
{
/* If it's a `(', then we might be looking at a cast. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type = NULL_TREE;
tree expr = NULL_TREE;
bool compound_literal_p;
const char *saved_message;
/* There's no way to know yet whether or not this is a cast.
For example, `(int (3))' is a unary-expression, while `(int)
3' is a cast. So, we resort to parsing tentatively. */
cp_parser_parse_tentatively (parser);
/* Types may not be defined in a cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in casts";
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* A very tricky bit is that `(struct S) { 3 }' is a
compound-literal (which we permit in C++ as an extension).
But, that construct is not a cast-expression -- it is a
postfix-expression. (The reason is that `(struct S) { 3 }.i'
is legal; if the compound-literal were a cast-expression,
you'd need an extra set of parentheses.) But, if we parse
the type-id, and it happens to be a class-specifier, then we
will commit to the parse at that point, because we cannot
undo the action that is done when creating a new class. So,
then we cannot back up and do a postfix-expression.
Therefore, we scan ahead to the closing `)', and check to see
if the token after the `)' is a `{'. If so, we are not
looking at a cast-expression.
Save tokens so that we can put them back. */
cp_lexer_save_tokens (parser->lexer);
/* Skip tokens until the next token is a closing parenthesis.
If we find the closing `)', and the next token is a `{', then
we are looking at a compound-literal. */
compound_literal_p
= (cp_parser_skip_to_closing_parenthesis (parser, false, false,
/*consume_paren=*/true)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE));
/* Roll back the tokens we skipped. */
cp_lexer_rollback_tokens (parser->lexer);
/* If we were looking at a compound-literal, simulate an error
so that the call to cp_parser_parse_definitely below will
fail. */
if (compound_literal_p)
cp_parser_simulate_error (parser);
else
{
bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
/* Look for the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* If ok so far, parse the dependent expression. We cannot be
sure it is a cast. Consider `(T ())'. It is a parenthesized
ctor of T, but looks like a cast to function returning T
without a dependent expression. */
if (!cp_parser_error_occurred (parser))
expr = cp_parser_cast_expression (parser,
/*address_p=*/false,
/*cast_p=*/true);
if (cp_parser_parse_definitely (parser))
{
/* Warn about old-style casts, if so requested. */
if (warn_old_style_cast
&& !in_system_header
&& !VOID_TYPE_P (type)
&& current_lang_name != lang_name_c)
warning (OPT_Wold_style_cast, "use of old-style cast");
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& (cp_parser_non_integral_constant_expression
(parser,
"a cast to a type other than an integral or "
"enumeration type")))
return error_mark_node;
/* Perform the cast. */
expr = build_c_cast (type, expr);
return expr;
}
}
/* If we get here, then it's not a cast, so it must be a
unary-expression. */
return cp_parser_unary_expression (parser, address_p, cast_p);
}
/* Parse a binary expression of the general form:
pm-expression:
cast-expression
pm-expression .* cast-expression
pm-expression ->* cast-expression
multiplicative-expression:
pm-expression
multiplicative-expression * pm-expression
multiplicative-expression / pm-expression
multiplicative-expression % pm-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
GNU Extension:
relational-expression:
relational-expression <? shift-expression
relational-expression >? shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
and-expression:
equality-expression
and-expression & equality-expression
exclusive-or-expression:
and-expression
exclusive-or-expression ^ and-expression
inclusive-or-expression:
exclusive-or-expression
inclusive-or-expression | exclusive-or-expression
logical-and-expression:
inclusive-or-expression
logical-and-expression && inclusive-or-expression
logical-or-expression:
logical-and-expression
logical-or-expression || logical-and-expression
All these are implemented with a single function like:
binary-expression:
simple-cast-expression
binary-expression <token> binary-expression
CAST_P is true if this expression is the target of a cast.
The binops_by_token map is used to get the tree codes for each <token> type.
binary-expressions are associated according to a precedence table. */
#define TOKEN_PRECEDENCE(token) \
((token->type == CPP_GREATER && !parser->greater_than_is_operator_p) \
? PREC_NOT_OPERATOR \
: binops_by_token[token->type].prec)
static tree
cp_parser_binary_expression (cp_parser* parser, bool cast_p,
enum cp_parser_prec prec)
{
cp_parser_expression_stack stack;
cp_parser_expression_stack_entry *sp = &stack[0];
tree lhs, rhs;
cp_token *token;
enum tree_code tree_type;
enum cp_parser_prec new_prec, lookahead_prec;
bool overloaded_p;
/* Parse the first expression. */
lhs = cp_parser_cast_expression (parser, /*address_p=*/false, cast_p);
for (;;)
{
/* Get an operator token. */
token = cp_lexer_peek_token (parser->lexer);
new_prec = TOKEN_PRECEDENCE (token);
/* Popping an entry off the stack means we completed a subexpression:
- either we found a token which is not an operator (`>' where it is not
an operator, or prec == PREC_NOT_OPERATOR), in which case popping
will happen repeatedly;
- or, we found an operator which has lower priority. This is the case
where the recursive descent *ascends*, as in `3 * 4 + 5' after
parsing `3 * 4'. */
if (new_prec <= prec)
{
if (sp == stack)
break;
else
goto pop;
}
get_rhs:
tree_type = binops_by_token[token->type].tree_type;
/* We used the operator token. */
cp_lexer_consume_token (parser->lexer);
/* Extract another operand. It may be the RHS of this expression
or the LHS of a new, higher priority expression. */
rhs = cp_parser_simple_cast_expression (parser);
/* Get another operator token. Look up its precedence to avoid
building a useless (immediately popped) stack entry for common
cases such as 3 + 4 + 5 or 3 * 4 + 5. */
token = cp_lexer_peek_token (parser->lexer);
lookahead_prec = TOKEN_PRECEDENCE (token);
if (lookahead_prec > new_prec)
{
/* ... and prepare to parse the RHS of the new, higher priority
expression. Since precedence levels on the stack are
monotonically increasing, we do not have to care about
stack overflows. */
sp->prec = prec;
sp->tree_type = tree_type;
sp->lhs = lhs;
sp++;
lhs = rhs;
prec = new_prec;
new_prec = lookahead_prec;
goto get_rhs;
pop:
/* If the stack is not empty, we have parsed into LHS the right side
(`4' in the example above) of an expression we had suspended.
We can use the information on the stack to recover the LHS (`3')
from the stack together with the tree code (`MULT_EXPR'), and
the precedence of the higher level subexpression
(`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token,
which will be used to actually build the additive expression. */
--sp;
prec = sp->prec;
tree_type = sp->tree_type;
rhs = lhs;
lhs = sp->lhs;
}
overloaded_p = false;
lhs = build_x_binary_op (tree_type, lhs, rhs, &overloaded_p);
/* If the binary operator required the use of an overloaded operator,
then this expression cannot be an integral constant-expression.
An overloaded operator can be used even if both operands are
otherwise permissible in an integral constant-expression if at
least one of the operands is of enumeration type. */
if (overloaded_p
&& (cp_parser_non_integral_constant_expression
(parser, "calls to overloaded operators")))
return error_mark_node;
}
return lhs;
}
/* Parse the `? expression : assignment-expression' part of a
conditional-expression. The LOGICAL_OR_EXPR is the
logical-or-expression that started the conditional-expression.
Returns a representation of the entire conditional-expression.
This routine is used by cp_parser_assignment_expression.
? expression : assignment-expression
GNU Extensions:
? : assignment-expression */
static tree
cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr)
{
tree expr;
tree assignment_expr;
/* Consume the `?' token. */
cp_lexer_consume_token (parser->lexer);
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_COLON))
/* Implicit true clause. */
expr = NULL_TREE;
else
/* Parse the expression. */
expr = cp_parser_expression (parser, /*cast_p=*/false);
/* The next token should be a `:'. */
cp_parser_require (parser, CPP_COLON, "`:'");
/* Parse the assignment-expression. */
assignment_expr = cp_parser_assignment_expression (parser, /*cast_p=*/false);
/* Build the conditional-expression. */
return build_x_conditional_expr (logical_or_expr,
expr,
assignment_expr);
}
/* Parse an assignment-expression.
assignment-expression:
conditional-expression
logical-or-expression assignment-operator assignment_expression
throw-expression
CAST_P is true if this expression is the target of a cast.
Returns a representation for the expression. */
static tree
cp_parser_assignment_expression (cp_parser* parser, bool cast_p)
{
tree expr;
/* If the next token is the `throw' keyword, then we're looking at
a throw-expression. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW))
expr = cp_parser_throw_expression (parser);
/* Otherwise, it must be that we are looking at a
logical-or-expression. */
else
{
/* Parse the binary expressions (logical-or-expression). */
expr = cp_parser_binary_expression (parser, cast_p, PREC_NOT_OPERATOR);
/* If the next token is a `?' then we're actually looking at a
conditional-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY))
return cp_parser_question_colon_clause (parser, expr);
else
{
enum tree_code assignment_operator;
/* If it's an assignment-operator, we're using the second
production. */
assignment_operator
= cp_parser_assignment_operator_opt (parser);
if (assignment_operator != ERROR_MARK)
{
tree rhs;
/* Parse the right-hand side of the assignment. */
rhs = cp_parser_assignment_expression (parser, cast_p);
/* An assignment may not appear in a
constant-expression. */
if (cp_parser_non_integral_constant_expression (parser,
"an assignment"))
return error_mark_node;
/* Build the assignment expression. */
expr = build_x_modify_expr (expr,
assignment_operator,
rhs);
}
}
}
return expr;
}
/* Parse an (optional) assignment-operator.
assignment-operator: one of
= *= /= %= += -= >>= <<= &= ^= |=
GNU Extension:
assignment-operator: one of
<?= >?=
If the next token is an assignment operator, the corresponding tree
code is returned, and the token is consumed. For example, for
`+=', PLUS_EXPR is returned. For `=' itself, the code returned is
NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%',
TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment
operator, ERROR_MARK is returned. */
static enum tree_code
cp_parser_assignment_operator_opt (cp_parser* parser)
{
enum tree_code op;
cp_token *token;
/* Peek at the next toen. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EQ:
op = NOP_EXPR;
break;
case CPP_MULT_EQ:
op = MULT_EXPR;
break;
case CPP_DIV_EQ:
op = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
op = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
op = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
op = MINUS_EXPR;
break;
case CPP_RSHIFT_EQ:
op = RSHIFT_EXPR;
break;
case CPP_LSHIFT_EQ:
op = LSHIFT_EXPR;
break;
case CPP_AND_EQ:
op = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
op = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
op = BIT_IOR_EXPR;
break;
default:
/* Nothing else is an assignment operator. */
op = ERROR_MARK;
}
/* If it was an assignment operator, consume it. */
if (op != ERROR_MARK)
cp_lexer_consume_token (parser->lexer);
return op;
}
/* Parse an expression.
expression:
assignment-expression
expression , assignment-expression
CAST_P is true if this expression is the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_expression (cp_parser* parser, bool cast_p)
{
tree expression = NULL_TREE;
while (true)
{
tree assignment_expression;
/* Parse the next assignment-expression. */
assignment_expression
= cp_parser_assignment_expression (parser, cast_p);
/* If this is the first assignment-expression, we can just
save it away. */
if (!expression)
expression = assignment_expression;
else
expression = build_x_compound_expr (expression,
assignment_expression);
/* If the next token is not a comma, then we are done with the
expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
/* A comma operator cannot appear in a constant-expression. */
if (cp_parser_non_integral_constant_expression (parser,
"a comma operator"))
expression = error_mark_node;
}
return expression;
}
/* Parse a constant-expression.
constant-expression:
conditional-expression
If ALLOW_NON_CONSTANT_P a non-constant expression is silently
accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not
constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P
is false, NON_CONSTANT_P should be NULL. */
static tree
cp_parser_constant_expression (cp_parser* parser,
bool allow_non_constant_p,
bool *non_constant_p)
{
bool saved_integral_constant_expression_p;
bool saved_allow_non_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
tree expression;
/* It might seem that we could simply parse the
conditional-expression, and then check to see if it were
TREE_CONSTANT. However, an expression that is TREE_CONSTANT is
one that the compiler can figure out is constant, possibly after
doing some simplifications or optimizations. The standard has a
precise definition of constant-expression, and we must honor
that, even though it is somewhat more restrictive.
For example:
int i[(2, 3)];
is not a legal declaration, because `(2, 3)' is not a
constant-expression. The `,' operator is forbidden in a
constant-expression. However, GCC's constant-folding machinery
will fold this operation to an INTEGER_CST for `3'. */
/* Save the old settings. */
saved_integral_constant_expression_p = parser->integral_constant_expression_p;
saved_allow_non_integral_constant_expression_p
= parser->allow_non_integral_constant_expression_p;
saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p;
/* We are now parsing a constant-expression. */
parser->integral_constant_expression_p = true;
parser->allow_non_integral_constant_expression_p = allow_non_constant_p;
parser->non_integral_constant_expression_p = false;
/* Although the grammar says "conditional-expression", we parse an
"assignment-expression", which also permits "throw-expression"
and the use of assignment operators. In the case that
ALLOW_NON_CONSTANT_P is false, we get better errors than we would
otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is
actually essential that we look for an assignment-expression.
For example, cp_parser_initializer_clauses uses this function to
determine whether a particular assignment-expression is in fact
constant. */
expression = cp_parser_assignment_expression (parser, /*cast_p=*/false);
/* Restore the old settings. */
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->allow_non_integral_constant_expression_p
= saved_allow_non_integral_constant_expression_p;
if (allow_non_constant_p)
*non_constant_p = parser->non_integral_constant_expression_p;
else if (parser->non_integral_constant_expression_p)
expression = error_mark_node;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expression;
}
/* Parse __builtin_offsetof.
offsetof-expression:
"__builtin_offsetof" "(" type-id "," offsetof-member-designator ")"
offsetof-member-designator:
id-expression
| offsetof-member-designator "." id-expression
| offsetof-member-designator "[" expression "]" */
static tree
cp_parser_builtin_offsetof (cp_parser *parser)
{
int save_ice_p, save_non_ice_p;
tree type, expr;
cp_id_kind dummy;
/* We're about to accept non-integral-constant things, but will
definitely yield an integral constant expression. Save and
restore these values around our local parsing. */
save_ice_p = parser->integral_constant_expression_p;
save_non_ice_p = parser->non_integral_constant_expression_p;
/* Consume the "__builtin_offsetof" token. */
cp_lexer_consume_token (parser->lexer);
/* Consume the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, "`,'");
/* Build the (type *)null that begins the traditional offsetof macro. */
expr = build_static_cast (build_pointer_type (type), null_pointer_node);
/* Parse the offsetof-member-designator. We begin as if we saw "expr->". */
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr,
true, &dummy);
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
/* offsetof-member-designator "[" expression "]" */
expr = cp_parser_postfix_open_square_expression (parser, expr, true);
break;
case CPP_DOT:
/* offsetof-member-designator "." identifier */
cp_lexer_consume_token (parser->lexer);
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT, expr,
true, &dummy);
break;
case CPP_CLOSE_PAREN:
/* Consume the ")" token. */
cp_lexer_consume_token (parser->lexer);
goto success;
default:
/* Error. We know the following require will fail, but
that gives the proper error message. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
cp_parser_skip_to_closing_parenthesis (parser, true, false, true);
expr = error_mark_node;
goto failure;
}
}
success:
/* If we're processing a template, we can't finish the semantics yet.
Otherwise we can fold the entire expression now. */
if (processing_template_decl)
expr = build1 (OFFSETOF_EXPR, size_type_node, expr);
else
expr = finish_offsetof (expr);
failure:
parser->integral_constant_expression_p = save_ice_p;
parser->non_integral_constant_expression_p = save_non_ice_p;
return expr;
}
/* Statements [gram.stmt.stmt] */
/* Parse a statement.
statement:
labeled-statement
expression-statement
compound-statement
selection-statement
iteration-statement
jump-statement
declaration-statement
try-block
IN_COMPOUND is true when the statement is nested inside a
cp_parser_compound_statement; this matters for certain pragmas. */
static void
cp_parser_statement (cp_parser* parser, tree in_statement_expr,
bool in_compound)
{
tree statement;
cp_token *token;
location_t statement_location;
restart:
/* There is no statement yet. */
statement = NULL_TREE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Remember the location of the first token in the statement. */
statement_location = token->location;
/* If this is a keyword, then that will often determine what kind of
statement we have. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_CASE:
case RID_DEFAULT:
/* Looks like a labeled-statement with a case label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser);
goto restart;
case RID_IF:
case RID_SWITCH:
statement = cp_parser_selection_statement (parser);
break;
case RID_WHILE:
case RID_DO:
case RID_FOR:
statement = cp_parser_iteration_statement (parser);
break;
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
statement = cp_parser_jump_statement (parser);
break;
/* Objective-C++ exception-handling constructs. */
case RID_AT_TRY:
case RID_AT_CATCH:
case RID_AT_FINALLY:
case RID_AT_SYNCHRONIZED:
case RID_AT_THROW:
statement = cp_parser_objc_statement (parser);
break;
case RID_TRY:
statement = cp_parser_try_block (parser);
break;
default:
/* It might be a keyword like `int' that can start a
declaration-statement. */
break;
}
}
else if (token->type == CPP_NAME)
{
/* If the next token is a `:', then we are looking at a
labeled-statement. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type == CPP_COLON)
{
/* Looks like a labeled-statement with an ordinary label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser);
goto restart;
}
}
/* Anything that starts with a `{' must be a compound-statement. */
else if (token->type == CPP_OPEN_BRACE)
statement = cp_parser_compound_statement (parser, NULL, false);
/* CPP_PRAGMA is a #pragma inside a function body, which constitutes
a statement all its own. */
else if (token->type == CPP_PRAGMA)
{
/* Only certain OpenMP pragmas are attached to statements, and thus
are considered statements themselves. All others are not. In
the context of a compound, accept the pragma as a "statement" and
return so that we can check for a close brace. Otherwise we
require a real statement and must go back and read one. */
if (in_compound)
cp_parser_pragma (parser, pragma_compound);
else if (!cp_parser_pragma (parser, pragma_stmt))
goto restart;
return;
}
else if (token->type == CPP_EOF)
{
cp_parser_error (parser, "expected statement");
return;
}
/* Everything else must be a declaration-statement or an
expression-statement. Try for the declaration-statement
first, unless we are looking at a `;', in which case we know that
we have an expression-statement. */
if (!statement)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_parse_tentatively (parser);
/* Try to parse the declaration-statement. */
cp_parser_declaration_statement (parser);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return;
}
/* Look for an expression-statement instead. */
statement = cp_parser_expression_statement (parser, in_statement_expr);
}
/* Set the line number for the statement. */
if (statement && STATEMENT_CODE_P (TREE_CODE (statement)))
SET_EXPR_LOCATION (statement, statement_location);
}
/* Parse the label for a labeled-statement, i.e.
identifier :
case constant-expression :
default :
GNU Extension:
case constant-expression ... constant-expression : statement
When a label is parsed without errors, the label is added to the
parse tree by the finish_* functions, so this function doesn't
have to return the label. */
static void
cp_parser_label_for_labeled_statement (cp_parser* parser)
{
cp_token *token;
/* The next token should be an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME
&& token->type != CPP_KEYWORD)
{
cp_parser_error (parser, "expected labeled-statement");
return;
}
switch (token->keyword)
{
case RID_CASE:
{
tree expr, expr_hi;
cp_token *ellipsis;
/* Consume the `case' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the constant-expression. */
expr = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
ellipsis = cp_lexer_peek_token (parser->lexer);
if (ellipsis->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
expr_hi =
cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
/* We don't need to emit warnings here, as the common code
will do this for us. */
}
else
expr_hi = NULL_TREE;
if (parser->in_switch_statement_p)
finish_case_label (expr, expr_hi);
else
error ("case label %qE not within a switch statement", expr);
}
break;
case RID_DEFAULT:
/* Consume the `default' token. */
cp_lexer_consume_token (parser->lexer);
if (parser->in_switch_statement_p)
finish_case_label (NULL_TREE, NULL_TREE);
else
error ("case label not within a switch statement");
break;
default:
/* Anything else must be an ordinary label. */
finish_label_stmt (cp_parser_identifier (parser));
break;
}
/* Require the `:' token. */
cp_parser_require (parser, CPP_COLON, "`:'");
}
/* Parse an expression-statement.
expression-statement:
expression [opt] ;
Returns the new EXPR_STMT -- or NULL_TREE if the expression
statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P
indicates whether this expression-statement is part of an
expression statement. */
static tree
cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr)
{
tree statement = NULL_TREE;
/* If the next token is a ';', then there is no expression
statement. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
statement = cp_parser_expression (parser, /*cast_p=*/false);
/* Consume the final `;'. */
cp_parser_consume_semicolon_at_end_of_statement (parser);
if (in_statement_expr
&& cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
/* This is the final expression statement of a statement
expression. */
statement = finish_stmt_expr_expr (statement, in_statement_expr);
else if (statement)
statement = finish_expr_stmt (statement);
else
finish_stmt ();
return statement;
}
/* Parse a compound-statement.
compound-statement:
{ statement-seq [opt] }
Returns a tree representing the statement. */
static tree
cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr,
bool in_try)
{
tree compound_stmt;
/* Consume the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"))
return error_mark_node;
/* Begin the compound-statement. */
compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0);
/* Parse an (optional) statement-seq. */
cp_parser_statement_seq_opt (parser, in_statement_expr);
/* Finish the compound-statement. */
finish_compound_stmt (compound_stmt);
/* Consume the `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
return compound_stmt;
}
/* Parse an (optional) statement-seq.
statement-seq:
statement
statement-seq [opt] statement */
static void
cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr)
{
/* Scan statements until there aren't any more. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a `}', then we've run out of statements. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
/* Parse the statement. */
cp_parser_statement (parser, in_statement_expr, true);
}
}
/* Parse a selection-statement.
selection-statement:
if ( condition ) statement
if ( condition ) statement else statement
switch ( condition ) statement
Returns the new IF_STMT or SWITCH_STMT. */
static tree
cp_parser_selection_statement (cp_parser* parser)
{
cp_token *token;
enum rid keyword;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, "selection-statement");
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_IF:
case RID_SWITCH:
{
tree statement;
tree condition;
/* Look for the `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
/* Begin the selection-statement. */
if (keyword == RID_IF)
statement = begin_if_stmt ();
else
statement = begin_switch_stmt ();
/* Parse the condition. */
condition = cp_parser_condition (parser);
/* Look for the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
if (keyword == RID_IF)
{
/* Add the condition. */
finish_if_stmt_cond (condition, statement);
/* Parse the then-clause. */
cp_parser_implicitly_scoped_statement (parser);
finish_then_clause (statement);
/* If the next token is `else', parse the else-clause. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_ELSE))
{
/* Consume the `else' keyword. */
cp_lexer_consume_token (parser->lexer);
begin_else_clause (statement);
/* Parse the else-clause. */
cp_parser_implicitly_scoped_statement (parser);
finish_else_clause (statement);
}
/* Now we're all done with the if-statement. */
finish_if_stmt (statement);
}
else
{
bool in_switch_statement_p;
unsigned char in_statement;
/* Add the condition. */
finish_switch_cond (condition, statement);
/* Parse the body of the switch-statement. */
in_switch_statement_p = parser->in_switch_statement_p;
in_statement = parser->in_statement;
parser->in_switch_statement_p = true;
parser->in_statement |= IN_SWITCH_STMT;
cp_parser_implicitly_scoped_statement (parser);
parser->in_switch_statement_p = in_switch_statement_p;
parser->in_statement = in_statement;
/* Now we're all done with the switch-statement. */
finish_switch_stmt (statement);
}
return statement;
}
break;
default:
cp_parser_error (parser, "expected selection-statement");
return error_mark_node;
}
}
/* Parse a condition.
condition:
expression
type-specifier-seq declarator = assignment-expression
GNU Extension:
condition:
type-specifier-seq declarator asm-specification [opt]
attributes [opt] = assignment-expression
Returns the expression that should be tested. */
static tree
cp_parser_condition (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
const char *saved_message;
/* Try the declaration first. */
cp_parser_parse_tentatively (parser);
/* New types are not allowed in the type-specifier-seq for a
condition. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in conditions";
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_condition==*/true,
&type_specifiers);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* If all is well, we might be looking at a declaration. */
if (!cp_parser_error_occurred (parser))
{
tree decl;
tree asm_specification;
tree attributes;
cp_declarator *declarator;
tree initializer = NULL_TREE;
/* Parse the declarator. */
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* Parse the asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* If the next token is not an `=', then we might still be
looking at an expression. For example:
if (A(a).x)
looks like a decl-specifier-seq and a declarator -- but then
there is no `=', so this is an expression. */
cp_parser_require (parser, CPP_EQ, "`='");
/* If we did see an `=', then we are looking at a declaration
for sure. */
if (cp_parser_parse_definitely (parser))
{
tree pushed_scope;
bool non_constant_p;
/* Create the declaration. */
decl = start_decl (declarator, &type_specifiers,
/*initialized_p=*/true,
attributes, /*prefix_attributes=*/NULL_TREE,
&pushed_scope);
/* Parse the assignment-expression. */
initializer
= cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
&non_constant_p);
if (!non_constant_p)
initializer = fold_non_dependent_expr (initializer);
/* Process the initializer. */
cp_finish_decl (decl,
initializer, !non_constant_p,
asm_specification,
LOOKUP_ONLYCONVERTING);
if (pushed_scope)
pop_scope (pushed_scope);
return convert_from_reference (decl);
}
}
/* If we didn't even get past the declarator successfully, we are
definitely not looking at a declaration. */
else
cp_parser_abort_tentative_parse (parser);
/* Otherwise, we are looking at an expression. */
return cp_parser_expression (parser, /*cast_p=*/false);
}
/* Parse an iteration-statement.
iteration-statement:
while ( condition ) statement
do statement while ( expression ) ;
for ( for-init-statement condition [opt] ; expression [opt] )
statement
Returns the new WHILE_STMT, DO_STMT, or FOR_STMT. */
static tree
cp_parser_iteration_statement (cp_parser* parser)
{
cp_token *token;
enum rid keyword;
tree statement;
unsigned char in_statement;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, "iteration-statement");
if (!token)
return error_mark_node;
/* Remember whether or not we are already within an iteration
statement. */
in_statement = parser->in_statement;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_WHILE:
{
tree condition;
/* Begin the while-statement. */
statement = begin_while_stmt ();
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Parse the condition. */
condition = cp_parser_condition (parser);
finish_while_stmt_cond (condition, statement);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Parse the dependent statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the while-statement. */
finish_while_stmt (statement);
}
break;
case RID_DO:
{
tree expression;
/* Begin the do-statement. */
statement = begin_do_stmt ();
/* Parse the body of the do-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_implicitly_scoped_statement (parser);
parser->in_statement = in_statement;
finish_do_body (statement);
/* Look for the `while' keyword. */
cp_parser_require_keyword (parser, RID_WHILE, "`while'");
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Parse the expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false);
/* We're done with the do-statement. */
finish_do_stmt (expression, statement);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
}
break;
case RID_FOR:
{
tree condition = NULL_TREE;
tree expression = NULL_TREE;
/* Begin the for-statement. */
statement = begin_for_stmt ();
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Parse the initialization. */
cp_parser_for_init_statement (parser);
finish_for_init_stmt (statement);
/* If there's a condition, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
condition = cp_parser_condition (parser);
finish_for_cond (condition, statement);
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
/* If there's an expression, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
expression = cp_parser_expression (parser, /*cast_p=*/false);
finish_for_expr (expression, statement);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Parse the body of the for-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the for-statement. */
finish_for_stmt (statement);
}
break;
default:
cp_parser_error (parser, "expected iteration-statement");
statement = error_mark_node;
break;
}
return statement;
}
/* Parse a for-init-statement.
for-init-statement:
expression-statement
simple-declaration */
static void
cp_parser_for_init_statement (cp_parser* parser)
{
/* If the next token is a `;', then we have an empty
expression-statement. Grammatically, this is also a
simple-declaration, but an invalid one, because it does not
declare anything. Therefore, if we did not handle this case
specially, we would issue an error message about an invalid
declaration. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
/* We're going to speculatively look for a declaration, falling back
to an expression, if necessary. */
cp_parser_parse_tentatively (parser);
/* Parse the declaration. */
cp_parser_simple_declaration (parser,
/*function_definition_allowed_p=*/false);
/* If the tentative parse failed, then we shall need to look for an
expression-statement. */
if (cp_parser_parse_definitely (parser))
return;
}
cp_parser_expression_statement (parser, false);
}
/* Parse a jump-statement.
jump-statement:
break ;
continue ;
return expression [opt] ;
goto identifier ;
GNU extension:
jump-statement:
goto * expression ;
Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */
static tree
cp_parser_jump_statement (cp_parser* parser)
{
tree statement = error_mark_node;
cp_token *token;
enum rid keyword;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, "jump-statement");
if (!token)
return error_mark_node;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_BREAK:
switch (parser->in_statement)
{
case 0:
error ("break statement not within loop or switch");
break;
default:
gcc_assert ((parser->in_statement & IN_SWITCH_STMT)
|| parser->in_statement == IN_ITERATION_STMT);
statement = finish_break_stmt ();
break;
case IN_OMP_BLOCK:
error ("invalid exit from OpenMP structured block");
break;
case IN_OMP_FOR:
error ("break statement used with OpenMP for loop");
break;
}
cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
break;
case RID_CONTINUE:
switch (parser->in_statement & ~IN_SWITCH_STMT)
{
case 0:
error ("continue statement not within a loop");
break;
case IN_ITERATION_STMT:
case IN_OMP_FOR:
statement = finish_continue_stmt ();
break;
case IN_OMP_BLOCK:
error ("invalid exit from OpenMP structured block");
break;
default:
gcc_unreachable ();
}
cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
break;
case RID_RETURN:
{
tree expr;
/* If the next token is a `;', then there is no
expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_expression (parser, /*cast_p=*/false);
else
expr = NULL_TREE;
/* Build the return-statement. */
statement = finish_return_stmt (expr);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
}
break;
case RID_GOTO:
/* Create the goto-statement. */
if (cp_lexer_next_token_is (parser->lexer, CPP_MULT))
{
/* Issue a warning about this use of a GNU extension. */
if (pedantic)
pedwarn ("ISO C++ forbids computed gotos");
/* Consume the '*' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the dependent expression. */
finish_goto_stmt (cp_parser_expression (parser, /*cast_p=*/false));
}
else
finish_goto_stmt (cp_parser_identifier (parser));
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
break;
default:
cp_parser_error (parser, "expected jump-statement");
break;
}
return statement;
}
/* Parse a declaration-statement.
declaration-statement:
block-declaration */
static void
cp_parser_declaration_statement (cp_parser* parser)
{
void *p;
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* Parse the block-declaration. */
cp_parser_block_declaration (parser, /*statement_p=*/true);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
/* Finish off the statement. */
finish_stmt ();
}
/* Some dependent statements (like `if (cond) statement'), are
implicitly in their own scope. In other words, if the statement is
a single statement (as opposed to a compound-statement), it is
none-the-less treated as if it were enclosed in braces. Any
declarations appearing in the dependent statement are out of scope
after control passes that point. This function parses a statement,
but ensures that is in its own scope, even if it is not a
compound-statement.
Returns the new statement. */
static tree
cp_parser_implicitly_scoped_statement (cp_parser* parser)
{
tree statement;
/* Mark if () ; with a special NOP_EXPR. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
cp_lexer_consume_token (parser->lexer);
statement = add_stmt (build_empty_stmt ());
}
/* if a compound is opened, we simply parse the statement directly. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
statement = cp_parser_compound_statement (parser, NULL, false);
/* If the token is not a `{', then we must take special action. */
else
{
/* Create a compound-statement. */
statement = begin_compound_stmt (0);
/* Parse the dependent-statement. */
cp_parser_statement (parser, NULL_TREE, false);
/* Finish the dummy compound-statement. */
finish_compound_stmt (statement);
}
/* Return the statement. */
return statement;
}
/* For some dependent statements (like `while (cond) statement'), we
have already created a scope. Therefore, even if the dependent
statement is a compound-statement, we do not want to create another
scope. */
static void
cp_parser_already_scoped_statement (cp_parser* parser)
{
/* If the token is a `{', then we must take special action. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
cp_parser_statement (parser, NULL_TREE, false);
else
{
/* Avoid calling cp_parser_compound_statement, so that we
don't create a new scope. Do everything else by hand. */
cp_parser_require (parser, CPP_OPEN_BRACE, "`{'");
cp_parser_statement_seq_opt (parser, NULL_TREE);
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
}
}
/* Declarations [gram.dcl.dcl] */
/* Parse an optional declaration-sequence.
declaration-seq:
declaration
declaration-seq declaration */
static void
cp_parser_declaration_seq_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
if (token->type == CPP_SEMICOLON)
{
/* A declaration consisting of a single semicolon is
invalid. Allow it unless we're being pedantic. */
cp_lexer_consume_token (parser->lexer);
if (pedantic && !in_system_header)
pedwarn ("extra %<;%>");
continue;
}
/* If we're entering or exiting a region that's implicitly
extern "C", modify the lang context appropriately. */
if (!parser->implicit_extern_c && token->implicit_extern_c)
{
push_lang_context (lang_name_c);
parser->implicit_extern_c = true;
}
else if (parser->implicit_extern_c && !token->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
if (token->type == CPP_PRAGMA)
{
/* A top-level declaration can consist solely of a #pragma.
A nested declaration cannot, so this is done here and not
in cp_parser_declaration. (A #pragma at block scope is
handled in cp_parser_statement.) */
cp_parser_pragma (parser, pragma_external);
continue;
}
/* Parse the declaration itself. */
cp_parser_declaration (parser);
}
}
/* Parse a declaration.
declaration:
block-declaration
function-definition
template-declaration
explicit-instantiation
explicit-specialization
linkage-specification
namespace-definition
GNU extension:
declaration:
__extension__ declaration */
static void
cp_parser_declaration (cp_parser* parser)
{
cp_token token1;
cp_token token2;
int saved_pedantic;
void *p;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_declaration (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Try to figure out what kind of declaration is present. */
token1 = *cp_lexer_peek_token (parser->lexer);
if (token1.type != CPP_EOF)
token2 = *cp_lexer_peek_nth_token (parser->lexer, 2);
else
{
token2.type = CPP_EOF;
token2.keyword = RID_MAX;
}
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token1.keyword == RID_EXTERN
&& cp_parser_is_string_literal (&token2))
cp_parser_linkage_specification (parser);
/* If the next token is `template', then we have either a template
declaration, an explicit instantiation, or an explicit
specialization. */
else if (token1.keyword == RID_TEMPLATE)
{
/* `template <>' indicates a template specialization. */
if (token2.type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
/* `template <' indicates a template declaration. */
else if (token2.type == CPP_LESS)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* Anything else must be an explicit instantiation. */
else
cp_parser_explicit_instantiation (parser);
}
/* If the next token is `export', then we have a template
declaration. */
else if (token1.keyword == RID_EXPORT)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* If the next token is `extern', 'static' or 'inline' and the one
after that is `template', we have a GNU extended explicit
instantiation directive. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& (token1.keyword == RID_EXTERN
|| token1.keyword == RID_STATIC
|| token1.keyword == RID_INLINE)
&& token2.keyword == RID_TEMPLATE)
cp_parser_explicit_instantiation (parser);
/* If the next token is `namespace', check for a named or unnamed
namespace definition. */
else if (token1.keyword == RID_NAMESPACE
&& (/* A named namespace definition. */
(token2.type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_EQ))
/* An unnamed namespace definition. */
|| token2.type == CPP_OPEN_BRACE
|| token2.keyword == RID_ATTRIBUTE))
cp_parser_namespace_definition (parser);
/* Objective-C++ declaration/definition. */
else if (c_dialect_objc () && OBJC_IS_AT_KEYWORD (token1.keyword))
cp_parser_objc_declaration (parser);
/* We must have either a block declaration or a function
definition. */
else
/* Try to parse a block-declaration, or a function-definition. */
cp_parser_block_declaration (parser, /*statement_p=*/false);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
}
/* Parse a block-declaration.
block-declaration:
simple-declaration
asm-definition
namespace-alias-definition
using-declaration
using-directive
GNU Extension:
block-declaration:
__extension__ block-declaration
label-declaration
If STATEMENT_P is TRUE, then this block-declaration is occurring as
part of a declaration-statement. */
static void
cp_parser_block_declaration (cp_parser *parser,
bool statement_p)
{
cp_token *token1;
int saved_pedantic;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_block_declaration (parser, statement_p);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Peek at the next token to figure out which kind of declaration is
present. */
token1 = cp_lexer_peek_token (parser->lexer);
/* If the next keyword is `asm', we have an asm-definition. */
if (token1->keyword == RID_ASM)
{
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
cp_parser_asm_definition (parser);
}
/* If the next keyword is `namespace', we have a
namespace-alias-definition. */
else if (token1->keyword == RID_NAMESPACE)
cp_parser_namespace_alias_definition (parser);
/* If the next keyword is `using', we have either a
using-declaration or a using-directive. */
else if (token1->keyword == RID_USING)
{
cp_token *token2;
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
/* If the token after `using' is `namespace', then we have a
using-directive. */
token2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token2->keyword == RID_NAMESPACE)
cp_parser_using_directive (parser);
/* Otherwise, it's a using-declaration. */
else
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
}
/* If the next keyword is `__label__' we have a label declaration. */
else if (token1->keyword == RID_LABEL)
{
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
cp_parser_label_declaration (parser);
}
/* Anything else must be a simple-declaration. */
else
cp_parser_simple_declaration (parser, !statement_p);
}
/* Parse a simple-declaration.
simple-declaration:
decl-specifier-seq [opt] init-declarator-list [opt] ;
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a
function-definition as a simple-declaration. */
static void
cp_parser_simple_declaration (cp_parser* parser,
bool function_definition_allowed_p)
{
cp_decl_specifier_seq decl_specifiers;
int declares_class_or_enum;
bool saw_declarator;
/* Defer access checks until we know what is being declared; the
checks for names appearing in the decl-specifier-seq should be
done as if we were in the scope of the thing being declared. */
push_deferring_access_checks (dk_deferred);
/* Parse the decl-specifier-seq. We have to keep track of whether
or not the decl-specifier-seq declares a named class or
enumeration type, since that is the only case in which the
init-declarator-list is allowed to be empty.
[dcl.dcl]
In a simple-declaration, the optional init-declarator-list can be
omitted only when declaring a class or enumeration, that is when
the decl-specifier-seq contains either a class-specifier, an
elaborated-type-specifier, or an enum-specifier. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* We no longer need to defer access checks. */
stop_deferring_access_checks ();
/* In a block scope, a valid declaration must always have a
decl-specifier-seq. By not trying to parse declarators, we can
resolve the declaration/expression ambiguity more quickly. */
if (!function_definition_allowed_p
&& !decl_specifiers.any_specifiers_p)
{
cp_parser_error (parser, "expected declaration");
goto done;
}
/* If the next two tokens are both identifiers, the code is
erroneous. The usual cause of this situation is code like:
T t;
where "T" should name a type -- but does not. */
if (!decl_specifiers.type
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
{
/* If parsing tentatively, we should commit; we really are
looking at a declaration. */
cp_parser_commit_to_tentative_parse (parser);
/* Give up. */
goto done;
}
/* If we have seen at least one decl-specifier, and the next token
is not a parenthesis, then we must be looking at a declaration.
(After "int (" we might be looking at a functional cast.) */
if (decl_specifiers.any_specifiers_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
cp_parser_commit_to_tentative_parse (parser);
/* Keep going until we hit the `;' at the end of the simple
declaration. */
saw_declarator = false;
while (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
cp_token *token;
bool function_definition_p;
tree decl;
if (saw_declarator)
{
/* If we are processing next declarator, coma is expected */
token = cp_lexer_peek_token (parser->lexer);
gcc_assert (token->type == CPP_COMMA);
cp_lexer_consume_token (parser->lexer);
}
else
saw_declarator = true;
/* Parse the init-declarator. */
decl = cp_parser_init_declarator (parser, &decl_specifiers,
/*checks=*/NULL,
function_definition_allowed_p,
/*member_p=*/false,
declares_class_or_enum,
&function_definition_p);
/* If an error occurred while parsing tentatively, exit quickly.
(That usually happens when in the body of a function; each
statement is treated as a declaration-statement until proven
otherwise.) */
if (cp_parser_error_occurred (parser))
goto done;
/* Handle function definitions specially. */
if (function_definition_p)
{
/* If the next token is a `,', then we are probably
processing something like:
void f() {}, *p;
which is erroneous. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
error ("mixing declarations and function-definitions is forbidden");
/* Otherwise, we're done with the list of declarators. */
else
{
pop_deferring_access_checks ();
return;
}
}
/* The next token should be either a `,' or a `;'. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', there are more declarators to come. */
if (token->type == CPP_COMMA)
/* will be consumed next time around */;
/* If it's a `;', we are done. */
else if (token->type == CPP_SEMICOLON)
break;
/* Anything else is an error. */
else
{
/* If we have already issued an error message we don't need
to issue another one. */
if (decl != error_mark_node
|| cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_error (parser, "expected %<,%> or %<;%>");
/* Skip tokens until we reach the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
goto done;
}
/* After the first time around, a function-definition is not
allowed -- even if it was OK at first. For example:
int i, f() {}
is not valid. */
function_definition_allowed_p = false;
}
/* Issue an error message if no declarators are present, and the
decl-specifier-seq does not itself declare a class or
enumeration. */
if (!saw_declarator)
{
if (cp_parser_declares_only_class_p (parser))
shadow_tag (&decl_specifiers);
/* Perform any deferred access checks. */
perform_deferred_access_checks ();
}
/* Consume the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
done:
pop_deferring_access_checks ();
}
/* Parse a decl-specifier-seq.
decl-specifier-seq:
decl-specifier-seq [opt] decl-specifier
decl-specifier:
storage-class-specifier
type-specifier
function-specifier
friend
typedef
GNU Extension:
decl-specifier:
attributes
Set *DECL_SPECS to a representation of the decl-specifier-seq.
The parser flags FLAGS is used to control type-specifier parsing.
*DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following
flags:
1: one of the decl-specifiers is an elaborated-type-specifier
(i.e., a type declaration)
2: one of the decl-specifiers is an enum-specifier or a
class-specifier (i.e., a type definition)
*/
static void
cp_parser_decl_specifier_seq (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
int* declares_class_or_enum)
{
bool constructor_possible_p = !parser->in_declarator_p;
/* Clear DECL_SPECS. */
clear_decl_specs (decl_specs);
/* Assume no class or enumeration type is declared. */
*declares_class_or_enum = 0;
/* Keep reading specifiers until there are no more to read. */
while (true)
{
bool constructor_p;
bool found_decl_spec;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Handle attributes. */
if (token->keyword == RID_ATTRIBUTE)
{
/* Parse the attributes. */
decl_specs->attributes
= chainon (decl_specs->attributes,
cp_parser_attributes_opt (parser));
continue;
}
/* Assume we will find a decl-specifier keyword. */
found_decl_spec = true;
/* If the next token is an appropriate keyword, we can simply
add it to the list. */
switch (token->keyword)
{
/* decl-specifier:
friend */
case RID_FRIEND:
if (!at_class_scope_p ())
{
error ("%<friend%> used outside of class");
cp_lexer_purge_token (parser->lexer);
}
else
{
++decl_specs->specs[(int) ds_friend];
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
break;
/* function-specifier:
inline
virtual
explicit */
case RID_INLINE:
case RID_VIRTUAL:
case RID_EXPLICIT:
cp_parser_function_specifier_opt (parser, decl_specs);
break;
/* decl-specifier:
typedef */
case RID_TYPEDEF:
++decl_specs->specs[(int) ds_typedef];
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* A constructor declarator cannot appear in a typedef. */
constructor_possible_p = false;
/* The "typedef" keyword can only occur in a declaration; we
may as well commit at this point. */
cp_parser_commit_to_tentative_parse (parser);
if (decl_specs->storage_class != sc_none)
decl_specs->conflicting_specifiers_p = true;
break;
/* storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
thread */
case RID_AUTO:
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
cp_parser_set_storage_class (parser, decl_specs, token->keyword);
break;
case RID_THREAD:
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
++decl_specs->specs[(int) ds_thread];
break;
default:
/* We did not yet find a decl-specifier yet. */
found_decl_spec = false;
break;
}
/* Constructors are a special case. The `S' in `S()' is not a
decl-specifier; it is the beginning of the declarator. */
constructor_p
= (!found_decl_spec
&& constructor_possible_p
&& (cp_parser_constructor_declarator_p
(parser, decl_specs->specs[(int) ds_friend] != 0)));
/* If we don't have a DECL_SPEC yet, then we must be looking at
a type-specifier. */
if (!found_decl_spec && !constructor_p)
{
int decl_spec_declares_class_or_enum;
bool is_cv_qualifier;
tree type_spec;
type_spec
= cp_parser_type_specifier (parser, flags,
decl_specs,
/*is_declaration=*/true,
&decl_spec_declares_class_or_enum,
&is_cv_qualifier);
*declares_class_or_enum |= decl_spec_declares_class_or_enum;
/* If this type-specifier referenced a user-defined type
(a typedef, class-name, etc.), then we can't allow any
more such type-specifiers henceforth.
[dcl.spec]
The longest sequence of decl-specifiers that could
possibly be a type name is taken as the
decl-specifier-seq of a declaration. The sequence shall
be self-consistent as described below.
[dcl.type]
As a general rule, at most one type-specifier is allowed
in the complete decl-specifier-seq of a declaration. The
only exceptions are the following:
-- const or volatile can be combined with any other
type-specifier.
-- signed or unsigned can be combined with char, long,
short, or int.
-- ..
Example:
typedef char* Pc;
void g (const int Pc);
Here, Pc is *not* part of the decl-specifier seq; it's
the declarator. Therefore, once we see a type-specifier
(other than a cv-qualifier), we forbid any additional
user-defined types. We *do* still allow things like `int
int' to be considered a decl-specifier-seq, and issue the
error message later. */
if (type_spec && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
/* A constructor declarator cannot follow a type-specifier. */
if (type_spec)
{
constructor_possible_p = false;
found_decl_spec = true;
}
}
/* If we still do not have a DECL_SPEC, then there are no more
decl-specifiers. */
if (!found_decl_spec)
break;
decl_specs->any_specifiers_p = true;
/* After we see one decl-specifier, further decl-specifiers are
always optional. */
flags |= CP_PARSER_FLAGS_OPTIONAL;
}
cp_parser_check_decl_spec (decl_specs);
/* Don't allow a friend specifier with a class definition. */
if (decl_specs->specs[(int) ds_friend] != 0
&& (*declares_class_or_enum & 2))
error ("class definition may not be declared a friend");
}
/* Parse an (optional) storage-class-specifier.
storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
storage-class-specifier:
thread
Returns an IDENTIFIER_NODE corresponding to the keyword used. */
static tree
cp_parser_storage_class_specifier_opt (cp_parser* parser)
{
switch (cp_lexer_peek_token (parser->lexer)->keyword)
{
case RID_AUTO:
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
default:
return NULL_TREE;
}
}
/* Parse an (optional) function-specifier.
function-specifier:
inline
virtual
explicit
Returns an IDENTIFIER_NODE corresponding to the keyword used.
Updates DECL_SPECS, if it is non-NULL. */
static tree
cp_parser_function_specifier_opt (cp_parser* parser,
cp_decl_specifier_seq *decl_specs)
{
switch (cp_lexer_peek_token (parser->lexer)->keyword)
{
case RID_INLINE:
if (decl_specs)
++decl_specs->specs[(int) ds_inline];
break;
case RID_VIRTUAL:
/* 14.5.2.3 [temp.mem]
A member function template shall not be virtual. */
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
error ("templates may not be %<virtual%>");
else if (decl_specs)
++decl_specs->specs[(int) ds_virtual];
break;
case RID_EXPLICIT:
if (decl_specs)
++decl_specs->specs[(int) ds_explicit];
break;
default:
return NULL_TREE;
}
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* Parse a linkage-specification.
linkage-specification:
extern string-literal { declaration-seq [opt] }
extern string-literal declaration */
static void
cp_parser_linkage_specification (cp_parser* parser)
{
tree linkage;
/* Look for the `extern' keyword. */
cp_parser_require_keyword (parser, RID_EXTERN, "`extern'");
/* Look for the string-literal. */
linkage = cp_parser_string_literal (parser, false, false);
/* Transform the literal into an identifier. If the literal is a
wide-character string, or contains embedded NULs, then we can't
handle it as the user wants. */
if (strlen (TREE_STRING_POINTER (linkage))
!= (size_t) (TREE_STRING_LENGTH (linkage) - 1))
{
cp_parser_error (parser, "invalid linkage-specification");
/* Assume C++ linkage. */
linkage = lang_name_cplusplus;
}
else
linkage = get_identifier (TREE_STRING_POINTER (linkage));
/* We're now using the new linkage. */
push_lang_context (linkage);
/* If the next token is a `{', then we're using the first
production. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the declarations. */
cp_parser_declaration_seq_opt (parser);
/* Look for the closing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
}
/* Otherwise, there's just one declaration. */
else
{
bool saved_in_unbraced_linkage_specification_p;
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = true;
cp_parser_declaration (parser);
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
}
/* We're done with the linkage-specification. */
pop_lang_context ();
}
/* Special member functions [gram.special] */
/* Parse a conversion-function-id.
conversion-function-id:
operator conversion-type-id
Returns an IDENTIFIER_NODE representing the operator. */
static tree
cp_parser_conversion_function_id (cp_parser* parser)
{
tree type;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree pushed_scope = NULL_TREE;
/* Look for the `operator' token. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, "`operator'"))
return error_mark_node;
/* When we parse the conversion-type-id, the current scope will be
reset. However, we need that information in able to look up the
conversion function later, so we save it here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We must enter the scope of the class so that the names of
entities declared within the class are available in the
conversion-type-id. For example, consider:
struct S {
typedef int I;
operator I();
};
S::operator I() { ... }
In order to see that `I' is a type-name in the definition, we
must be in the scope of `S'. */
if (saved_scope)
pushed_scope = push_scope (saved_scope);
/* Parse the conversion-type-id. */
type = cp_parser_conversion_type_id (parser);
/* Leave the scope of the class, if any. */
if (pushed_scope)
pop_scope (pushed_scope);
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If the TYPE is invalid, indicate failure. */
if (type == error_mark_node)
return error_mark_node;
return mangle_conv_op_name_for_type (type);
}
/* Parse a conversion-type-id:
conversion-type-id:
type-specifier-seq conversion-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_conversion_type_id (cp_parser* parser)
{
tree attributes;
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
tree type_specified;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* Parse the type-specifiers. */
cp_parser_type_specifier_seq (parser, /*is_condition=*/false,
&type_specifiers);
/* If that didn't work, stop. */
if (type_specifiers.type == error_mark_node)
return error_mark_node;
/* Parse the conversion-declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME,
/*initialized=*/0, &attributes);
if (attributes)
cplus_decl_attributes (&type_specified, attributes, /*flags=*/0);
return type_specified;
}
/* Parse an (optional) conversion-declarator.
conversion-declarator:
ptr-operator conversion-declarator [opt]
*/
static cp_declarator *
cp_parser_conversion_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree class_type;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Try the ptr-operator. */
code = cp_parser_ptr_operator (parser, &class_type, &cv_quals);
/* If it worked, look for more conversion-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
/* Create the representation of the declarator. */
if (class_type)
declarator = make_ptrmem_declarator (cv_quals, class_type,
declarator);
else if (code == INDIRECT_REF)
declarator = make_pointer_declarator (cv_quals, declarator);
else
declarator = make_reference_declarator (cv_quals, declarator);
return declarator;
}
return NULL;
}
/* Parse an (optional) ctor-initializer.
ctor-initializer:
: mem-initializer-list
Returns TRUE iff the ctor-initializer was actually present. */
static bool
cp_parser_ctor_initializer_opt (cp_parser* parser)
{
/* If the next token is not a `:', then there is no
ctor-initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
/* Do default initialization of any bases and members. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (NULL_TREE);
return false;
}
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* And the mem-initializer-list. */
cp_parser_mem_initializer_list (parser);
return true;
}
/* Parse a mem-initializer-list.
mem-initializer-list:
mem-initializer
mem-initializer , mem-initializer-list */
static void
cp_parser_mem_initializer_list (cp_parser* parser)
{
tree mem_initializer_list = NULL_TREE;
/* Let the semantic analysis code know that we are starting the
mem-initializer-list. */
if (!DECL_CONSTRUCTOR_P (current_function_decl))
error ("only constructors take base initializers");
/* Loop through the list. */
while (true)
{
tree mem_initializer;
/* Parse the mem-initializer. */
mem_initializer = cp_parser_mem_initializer (parser);
/* Add it to the list, unless it was erroneous. */
if (mem_initializer != error_mark_node)
{
TREE_CHAIN (mem_initializer) = mem_initializer_list;
mem_initializer_list = mem_initializer;
}
/* If the next token is not a `,', we're done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
/* Perform semantic analysis. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (mem_initializer_list);
}
/* Parse a mem-initializer.
mem-initializer:
mem-initializer-id ( expression-list [opt] )
GNU extension:
mem-initializer:
( expression-list [opt] )
Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base
class) or FIELD_DECL (for a non-static data member) to initialize;
the TREE_VALUE is the expression-list. An empty initialization
list is represented by void_list_node. */
static tree
cp_parser_mem_initializer (cp_parser* parser)
{
tree mem_initializer_id;
tree expression_list;
tree member;
/* Find out what is being initialized. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
pedwarn ("anachronistic old-style base class initializer");
mem_initializer_id = NULL_TREE;
}
else
mem_initializer_id = cp_parser_mem_initializer_id (parser);
member = expand_member_init (mem_initializer_id);
if (member && !DECL_P (member))
in_base_initializer = 1;
expression_list
= cp_parser_parenthesized_expression_list (parser, false,
/*cast_p=*/false,
/*non_constant_p=*/NULL);
if (expression_list == error_mark_node)
return error_mark_node;
if (!expression_list)
expression_list = void_type_node;
in_base_initializer = 0;
return member ? build_tree_list (member, expression_list) : error_mark_node;
}
/* Parse a mem-initializer-id.
mem-initializer-id:
:: [opt] nested-name-specifier [opt] class-name
identifier
Returns a TYPE indicating the class to be initializer for the first
production. Returns an IDENTIFIER_NODE indicating the data member
to be initialized for the second production. */
static tree
cp_parser_mem_initializer_id (cp_parser* parser)
{
bool global_scope_p;
bool nested_name_specifier_p;
bool template_p = false;
tree id;
/* `typename' is not allowed in this context ([temp.res]). */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
error ("keyword %<typename%> not allowed in this context (a qualified "
"member initializer is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to assume that we have seen the `typename' keyword at this
point. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
/*is_declaration=*/true)
!= NULL_TREE);
if (nested_name_specifier_p)
template_p = cp_parser_optional_template_keyword (parser);
/* If there is a `::' operator or a nested-name-specifier, then we
are definitely looking for a class-name. */
if (global_scope_p || nested_name_specifier_p)
return cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/template_p,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* Otherwise, we could also be looking for an ordinary identifier. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
id = cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* If we found one, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, look for an ordinary identifier. */
return cp_parser_identifier (parser);
}
/* Overloading [gram.over] */
/* Parse an operator-function-id.
operator-function-id:
operator operator
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator_function_id (cp_parser* parser)
{
/* Look for the `operator' keyword. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, "`operator'"))
return error_mark_node;
/* And then the name of the operator itself. */
return cp_parser_operator (parser);
}
/* Parse an operator.
operator:
new delete new[] delete[] + - * / % ^ & | ~ ! = < >
+= -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= &&
|| ++ -- , ->* -> () []
GNU Extensions:
operator:
<? >? <?= >?=
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator (cp_parser* parser)
{
tree id = NULL_TREE;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Figure out which operator we have. */
switch (token->type)
{
case CPP_KEYWORD:
{
enum tree_code op;
/* The keyword should be either `new' or `delete'. */
if (token->keyword == RID_NEW)
op = NEW_EXPR;
else if (token->keyword == RID_DELETE)
op = DELETE_EXPR;
else
break;
/* Consume the `new' or `delete' token. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `[' token then this is the array variant of the
operator. */
if (token->type == CPP_OPEN_SQUARE)
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'");
id = ansi_opname (op == NEW_EXPR
? VEC_NEW_EXPR : VEC_DELETE_EXPR);
}
/* Otherwise, we have the non-array variant. */
else
id = ansi_opname (op);
return id;
}
case CPP_PLUS:
id = ansi_opname (PLUS_EXPR);
break;
case CPP_MINUS:
id = ansi_opname (MINUS_EXPR);
break;
case CPP_MULT:
id = ansi_opname (MULT_EXPR);
break;
case CPP_DIV:
id = ansi_opname (TRUNC_DIV_EXPR);
break;
case CPP_MOD:
id = ansi_opname (TRUNC_MOD_EXPR);
break;
case CPP_XOR:
id = ansi_opname (BIT_XOR_EXPR);
break;
case CPP_AND:
id = ansi_opname (BIT_AND_EXPR);
break;
case CPP_OR:
id = ansi_opname (BIT_IOR_EXPR);
break;
case CPP_COMPL:
id = ansi_opname (BIT_NOT_EXPR);
break;
case CPP_NOT:
id = ansi_opname (TRUTH_NOT_EXPR);
break;
case CPP_EQ:
id = ansi_assopname (NOP_EXPR);
break;
case CPP_LESS:
id = ansi_opname (LT_EXPR);
break;
case CPP_GREATER:
id = ansi_opname (GT_EXPR);
break;
case CPP_PLUS_EQ:
id = ansi_assopname (PLUS_EXPR);
break;
case CPP_MINUS_EQ:
id = ansi_assopname (MINUS_EXPR);
break;
case CPP_MULT_EQ:
id = ansi_assopname (MULT_EXPR);
break;
case CPP_DIV_EQ:
id = ansi_assopname (TRUNC_DIV_EXPR);
break;
case CPP_MOD_EQ:
id = ansi_assopname (TRUNC_MOD_EXPR);
break;
case CPP_XOR_EQ:
id = ansi_assopname (BIT_XOR_EXPR);
break;
case CPP_AND_EQ:
id = ansi_assopname (BIT_AND_EXPR);
break;
case CPP_OR_EQ:
id = ansi_assopname (BIT_IOR_EXPR);
break;
case CPP_LSHIFT:
id = ansi_opname (LSHIFT_EXPR);
break;
case CPP_RSHIFT:
id = ansi_opname (RSHIFT_EXPR);
break;
case CPP_LSHIFT_EQ:
id = ansi_assopname (LSHIFT_EXPR);
break;
case CPP_RSHIFT_EQ:
id = ansi_assopname (RSHIFT_EXPR);
break;
case CPP_EQ_EQ:
id = ansi_opname (EQ_EXPR);
break;
case CPP_NOT_EQ:
id = ansi_opname (NE_EXPR);
break;
case CPP_LESS_EQ:
id = ansi_opname (LE_EXPR);
break;
case CPP_GREATER_EQ:
id = ansi_opname (GE_EXPR);
break;
case CPP_AND_AND:
id = ansi_opname (TRUTH_ANDIF_EXPR);
break;
case CPP_OR_OR:
id = ansi_opname (TRUTH_ORIF_EXPR);
break;
case CPP_PLUS_PLUS:
id = ansi_opname (POSTINCREMENT_EXPR);
break;
case CPP_MINUS_MINUS:
id = ansi_opname (PREDECREMENT_EXPR);
break;
case CPP_COMMA:
id = ansi_opname (COMPOUND_EXPR);
break;
case CPP_DEREF_STAR:
id = ansi_opname (MEMBER_REF);
break;
case CPP_DEREF:
id = ansi_opname (COMPONENT_REF);
break;
case CPP_OPEN_PAREN:
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
return ansi_opname (CALL_EXPR);
case CPP_OPEN_SQUARE:
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'");
return ansi_opname (ARRAY_REF);
default:
/* Anything else is an error. */
break;
}
/* If we have selected an identifier, we need to consume the
operator token. */
if (id)
cp_lexer_consume_token (parser->lexer);
/* Otherwise, no valid operator name was present. */
else
{
cp_parser_error (parser, "expected operator");
id = error_mark_node;
}
return id;
}
/* Parse a template-declaration.
template-declaration:
export [opt] template < template-parameter-list > declaration
If MEMBER_P is TRUE, this template-declaration occurs within a
class-specifier.
The grammar rule given by the standard isn't correct. What
is really meant is:
template-declaration:
export [opt] template-parameter-list-seq
decl-specifier-seq [opt] init-declarator [opt] ;
export [opt] template-parameter-list-seq
function-definition
template-parameter-list-seq:
template-parameter-list-seq [opt]
template < template-parameter-list > */
static void
cp_parser_template_declaration (cp_parser* parser, bool member_p)
{
/* Check for `export'. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT))
{
/* Consume the `export' token. */
cp_lexer_consume_token (parser->lexer);
/* Warn that we do not support `export'. */
warning (0, "keyword %<export%> not implemented, and will be ignored");
}
cp_parser_template_declaration_after_export (parser, member_p);
}
/* Parse a template-parameter-list.
template-parameter-list:
template-parameter
template-parameter-list , template-parameter
Returns a TREE_LIST. Each node represents a template parameter.
The nodes are connected via their TREE_CHAINs. */
static tree
cp_parser_template_parameter_list (cp_parser* parser)
{
tree parameter_list = NULL_TREE;
begin_template_parm_list ();
while (true)
{
tree parameter;
cp_token *token;
bool is_non_type;
/* Parse the template-parameter. */
parameter = cp_parser_template_parameter (parser, &is_non_type);
/* Add it to the list. */
if (parameter != error_mark_node)
parameter_list = process_template_parm (parameter_list,
parameter,
is_non_type);
else
{
tree err_parm = build_tree_list (parameter, parameter);
TREE_VALUE (err_parm) = error_mark_node;
parameter_list = chainon (parameter_list, err_parm);
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a `,', we're done. */
if (token->type != CPP_COMMA)
break;
/* Otherwise, consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return end_template_parm_list (parameter_list);
}
/* Parse a template-parameter.
template-parameter:
type-parameter
parameter-declaration
If all goes well, returns a TREE_LIST. The TREE_VALUE represents
the parameter. The TREE_PURPOSE is the default value, if any.
Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true
iff this parameter is a non-type parameter. */
static tree
cp_parser_template_parameter (cp_parser* parser, bool *is_non_type)
{
cp_token *token;
cp_parameter_declarator *parameter_declarator;
tree parm;
/* Assume it is a type parameter or a template parameter. */
*is_non_type = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is `class' or `template', we have a type-parameter. */
if (token->keyword == RID_TEMPLATE)
return cp_parser_type_parameter (parser);
/* If it is `class' or `typename' we do not know yet whether it is a
type parameter or a non-type parameter. Consider:
template <typename T, typename T::X X> ...
or:
template <class C, class D*> ...
Here, the first parameter is a type parameter, and the second is
a non-type parameter. We can tell by looking at the token after
the identifier -- if it is a `,', `=', or `>' then we have a type
parameter. */
if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS)
{
/* Peek at the token after `class' or `typename'. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an identifier, skip it. */
if (token->type == CPP_NAME)
token = cp_lexer_peek_nth_token (parser->lexer, 3);
/* Now, see if the token looks like the end of a template
parameter. */
if (token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_GREATER)
return cp_parser_type_parameter (parser);
}
/* Otherwise, it is a non-type parameter.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
*is_non_type = true;
parameter_declarator
= cp_parser_parameter_declaration (parser, /*template_parm_p=*/true,
/*parenthesized_p=*/NULL);
parm = grokdeclarator (parameter_declarator->declarator,
¶meter_declarator->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
if (parm == error_mark_node)
return error_mark_node;
return build_tree_list (parameter_declarator->default_argument, parm);
}
/* Parse a type-parameter.
type-parameter:
class identifier [opt]
class identifier [opt] = type-id
typename identifier [opt]
typename identifier [opt] = type-id
template < template-parameter-list > class identifier [opt]
template < template-parameter-list > class identifier [opt]
= id-expression
Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The
TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is
the declaration of the parameter. */
static tree
cp_parser_type_parameter (cp_parser* parser)
{
cp_token *token;
tree parameter;
/* Look for a keyword to tell us what kind of parameter this is. */
token = cp_parser_require (parser, CPP_KEYWORD,
"`class', `typename', or `template'");
if (!token)
return error_mark_node;
switch (token->keyword)
{
case RID_CLASS:
case RID_TYPENAME:
{
tree identifier;
tree default_argument;
/* If the next token is an identifier, then it names the
parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Create the parameter. */
parameter = finish_template_type_parm (class_type_node, identifier);
/* If the next token is an `=', we have a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the default-argument. */
push_deferring_access_checks (dk_no_deferred);
default_argument = cp_parser_type_id (parser);
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
case RID_TEMPLATE:
{
tree parameter_list;
tree identifier;
tree default_argument;
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, "`<'");
/* Parse the template-parameter-list. */
parameter_list = cp_parser_template_parameter_list (parser);
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, "`>'");
/* Look for the `class' keyword. */
cp_parser_require_keyword (parser, RID_CLASS, "`class'");
/* If the next token is an `=', then there is a
default-argument. If the next token is a `>', we are at
the end of the parameter-list. If the next token is a `,',
then we are at the end of this parameter. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
{
identifier = cp_parser_identifier (parser);
/* Treat invalid names as if the parameter were nameless. */
if (identifier == error_mark_node)
identifier = NULL_TREE;
}
else
identifier = NULL_TREE;
/* Create the template parameter. */
parameter = finish_template_template_parm (class_type_node,
identifier);
/* If the next token is an `=', then there is a
default-argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
bool is_template;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the id-expression. */
push_deferring_access_checks (dk_no_deferred);
default_argument
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/&is_template,
/*declarator_p=*/false,
/*optional_p=*/false);
if (TREE_CODE (default_argument) == TYPE_DECL)
/* If the id-expression was a template-id that refers to
a template-class, we already have the declaration here,
so no further lookup is needed. */
;
else
/* Look up the name. */
default_argument
= cp_parser_lookup_name (parser, default_argument,
none_type,
/*is_template=*/is_template,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL);
/* See if the default argument is valid. */
default_argument
= check_template_template_default_arg (default_argument);
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
default:
gcc_unreachable ();
break;
}
return parameter;
}
/* Parse a template-id.
template-id:
template-name < template-argument-list [opt] >
If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the
`template' keyword. In this case, a TEMPLATE_ID_EXPR will be
returned. Otherwise, if the template-name names a function, or set
of functions, returns a TEMPLATE_ID_EXPR. If the template-name
names a class, returns a TYPE_DECL for the specialization.
If CHECK_DEPENDENCY_P is FALSE, names are looked up in
uninstantiated templates. */
static tree
cp_parser_template_id (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
bool is_declaration)
{
int i;
tree template;
tree arguments;
tree template_id;
cp_token_position start_of_id = 0;
deferred_access_check *chk;
VEC (deferred_access_check,gc) *access_check;
cp_token *next_token, *next_token_2;
bool is_identifier;
/* If the next token corresponds to a template-id, there is no need
to reparse it. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type == CPP_TEMPLATE_ID)
{
struct tree_check *check_value;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
access_check = check_value->checks;
if (access_check)
{
for (i = 0 ;
VEC_iterate (deferred_access_check, access_check, i, chk) ;
++i)
{
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl);
}
}
/* Return the stored value. */
return check_value->value;
}
/* Avoid performing name lookup if there is no possibility of
finding a template-id. */
if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR)
|| (next_token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2)))
{
cp_parser_error (parser, "expected template-id");
return error_mark_node;
}
/* Remember where the template-id starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start_of_id = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
/* Parse the template-name. */
is_identifier = false;
template = cp_parser_template_name (parser, template_keyword_p,
check_dependency_p,
is_declaration,
&is_identifier);
if (template == error_mark_node || is_identifier)
{
pop_deferring_access_checks ();
return template;
}
/* If we find the sequence `[:' after a template-name, it's probably
a digraph-typo for `< ::'. Substitute the tokens and check if we can
parse correctly the argument list. */
next_token = cp_lexer_peek_token (parser->lexer);
next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (next_token->type == CPP_OPEN_SQUARE
&& next_token->flags & DIGRAPH
&& next_token_2->type == CPP_COLON
&& !(next_token_2->flags & PREV_WHITE))
{
cp_parser_parse_tentatively (parser);
/* Change `:' into `::'. */
next_token_2->type = CPP_SCOPE;
/* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is
CPP_LESS. */
cp_lexer_consume_token (parser->lexer);
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
if (!cp_parser_parse_definitely (parser))
{
/* If we couldn't parse an argument list, then we revert our changes
and return simply an error. Maybe this is not a template-id
after all. */
next_token_2->type = CPP_COLON;
cp_parser_error (parser, "expected %<<%>");
pop_deferring_access_checks ();
return error_mark_node;
}
/* Otherwise, emit an error about the invalid digraph, but continue
parsing because we got our argument list. */
pedwarn ("%<<::%> cannot begin a template-argument list");
inform ("%<<:%> is an alternate spelling for %<[%>. Insert whitespace "
"between %<<%> and %<::%>");
if (!flag_permissive)
{
static bool hint;
if (!hint)
{
inform ("(if you use -fpermissive G++ will accept your code)");
hint = true;
}
}
}
else
{
/* Look for the `<' that starts the template-argument-list. */
if (!cp_parser_require (parser, CPP_LESS, "`<'"))
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
}
/* Build a representation of the specialization. */
if (TREE_CODE (template) == IDENTIFIER_NODE)
template_id = build_min_nt (TEMPLATE_ID_EXPR, template, arguments);
else if (DECL_CLASS_TEMPLATE_P (template)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (template))
{
bool entering_scope;
/* In "template <typename T> ... A<T>::", A<T> is the abstract A
template (rather than some instantiation thereof) only if
is not nested within some other construct. For example, in
"template <typename T> void f(T) { A<T>::", A<T> is just an
instantiation of A. */
entering_scope = (template_parm_scope_p ()
&& cp_lexer_next_token_is (parser->lexer,
CPP_SCOPE));
template_id
= finish_template_type (template, arguments, entering_scope);
}
else
{
/* If it's not a class-template or a template-template, it should be
a function-template. */
gcc_assert ((DECL_FUNCTION_TEMPLATE_P (template)
|| TREE_CODE (template) == OVERLOAD
|| BASELINK_P (template)));
template_id = lookup_template_function (template, arguments);
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the template-id with a CPP_TEMPLATE_ID token. That way,
should we re-parse the token stream, we will not have to repeat
the effort required to do the parse, nor will we issue duplicate
error messages about problems during instantiation of the
template. */
if (start_of_id)
{
cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id);
/* Reset the contents of the START_OF_ID token. */
token->type = CPP_TEMPLATE_ID;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = GGC_CNEW (struct tree_check);
token->u.tree_check_value->value = template_id;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start_of_id);
/* ??? Can we actually assume that, if template_id ==
error_mark_node, we will have issued a diagnostic to the
user, as opposed to simply marking the tentative parse as
failed? */
if (cp_parser_error_occurred (parser) && template_id != error_mark_node)
error ("parse error in template argument list");
}
pop_deferring_access_checks ();
return template_id;
}
/* Parse a template-name.
template-name:
identifier
The standard should actually say:
template-name:
identifier
operator-function-id
A defect report has been filed about this issue.
A conversion-function-id cannot be a template name because they cannot
be part of a template-id. In fact, looking at this code:
a.operator K<int>()
the conversion-function-id is "operator K<int>", and K<int> is a type-id.
It is impossible to call a templated conversion-function-id with an
explicit argument list, since the only allowed template parameter is
the type to which it is converting.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword, in a construction like:
T::template f<3>()
In that case `f' is taken to be a template-name, even though there
is no way of knowing for sure.
Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the
name refers to a set of overloaded functions, at least one of which
is a template, or an IDENTIFIER_NODE with the name of the template,
if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE,
names are looked up inside uninstantiated templates. */
static tree
cp_parser_template_name (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool is_declaration,
bool *is_identifier)
{
tree identifier;
tree decl;
tree fns;
/* If the next token is `operator', then we have either an
operator-function-id or a conversion-function-id. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR))
{
/* We don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
identifier = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
{
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* Look for the identifier. */
else
identifier = cp_parser_identifier (parser);
/* If we didn't find an identifier, we don't have a template-id. */
if (identifier == error_mark_node)
return error_mark_node;
/* If the name immediately followed the `template' keyword, then it
is a template-name. However, if the next token is not `<', then
we do not treat it as a template-name, since it is not being used
as part of a template-id. This enables us to handle constructs
like:
template <typename T> struct S { S(); };
template <typename T> S<T>::S();
correctly. We would treat `S' as a template -- if it were `S<T>'
-- but we do not if there is no `<'. */
if (processing_template_decl
&& cp_parser_nth_token_starts_template_argument_list_p (parser, 1))
{
/* In a declaration, in a dependent context, we pretend that the
"template" keyword was present in order to improve error
recovery. For example, given:
template <typename T> void f(T::X<int>);
we want to treat "X<int>" as a template-id. */
if (is_declaration
&& !template_keyword_p
&& parser->scope && TYPE_P (parser->scope)
&& check_dependency_p
&& dependent_type_p (parser->scope)
/* Do not do this for dtors (or ctors), since they never
need the template keyword before their name. */
&& !constructor_name_p (identifier, parser->scope))
{
cp_token_position start = 0;
/* Explain what went wrong. */
error ("non-template %qD used as template", identifier);
inform ("use %<%T::template %D%> to indicate that it is a template",
parser->scope, identifier);
/* If parsing tentatively, find the location of the "<" token. */
if (cp_parser_simulate_error (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Parse the template arguments so that we can issue error
messages about them. */
cp_lexer_consume_token (parser->lexer);
cp_parser_enclosed_template_argument_list (parser);
/* Skip tokens until we find a good place from which to
continue parsing. */
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/false);
/* If parsing tentatively, permanently remove the
template argument list. That will prevent duplicate
error messages from being issued about the missing
"template" keyword. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
if (is_identifier)
*is_identifier = true;
return identifier;
}
/* If the "template" keyword is present, then there is generally
no point in doing name-lookup, so we just return IDENTIFIER.
But, if the qualifying scope is non-dependent then we can
(and must) do name-lookup normally. */
if (template_keyword_p
&& (!parser->scope
|| (TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))))
return identifier;
}
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
check_dependency_p,
/*ambiguous_decls=*/NULL);
decl = maybe_get_template_decl_from_type_decl (decl);
/* If DECL is a template, then the name was a template-name. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
;
else
{
tree fn = NULL_TREE;
/* The standard does not explicitly indicate whether a name that
names a set of overloaded declarations, some of which are
templates, is a template-name. However, such a name should
be a template-name; otherwise, there is no way to form a
template-id for the overloaded templates. */
fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl;
if (TREE_CODE (fns) == OVERLOAD)
for (fn = fns; fn; fn = OVL_NEXT (fn))
if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL)
break;
if (!fn)
{
/* The name does not name a template. */
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* If DECL is dependent, and refers to a function, then just return
its name; we will look it up again during template instantiation. */
if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl))
{
tree scope = CP_DECL_CONTEXT (get_first_fn (decl));
if (TYPE_P (scope) && dependent_type_p (scope))
return identifier;
}
return decl;
}
/* Parse a template-argument-list.
template-argument-list:
template-argument
template-argument-list , template-argument
Returns a TREE_VEC containing the arguments. */
static tree
cp_parser_template_argument_list (cp_parser* parser)
{
tree fixed_args[10];
unsigned n_args = 0;
unsigned alloced = 10;
tree *arg_ary = fixed_args;
tree vec;
bool saved_in_template_argument_list_p;
bool saved_ice_p;
bool saved_non_ice_p;
saved_in_template_argument_list_p = parser->in_template_argument_list_p;
parser->in_template_argument_list_p = true;
/* Even if the template-id appears in an integral
constant-expression, the contents of the argument list do
not. */
saved_ice_p = parser->integral_constant_expression_p;
parser->integral_constant_expression_p = false;
saved_non_ice_p = parser->non_integral_constant_expression_p;
parser->non_integral_constant_expression_p = false;
/* Parse the arguments. */
do
{
tree argument;
if (n_args)
/* Consume the comma. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-argument. */
argument = cp_parser_template_argument (parser);
if (n_args == alloced)
{
alloced *= 2;
if (arg_ary == fixed_args)
{
arg_ary = XNEWVEC (tree, alloced);
memcpy (arg_ary, fixed_args, sizeof (tree) * n_args);
}
else
arg_ary = XRESIZEVEC (tree, arg_ary, alloced);
}
arg_ary[n_args++] = argument;
}
while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
vec = make_tree_vec (n_args);
while (n_args--)
TREE_VEC_ELT (vec, n_args) = arg_ary[n_args];
if (arg_ary != fixed_args)
free (arg_ary);
parser->non_integral_constant_expression_p = saved_non_ice_p;
parser->integral_constant_expression_p = saved_ice_p;
parser->in_template_argument_list_p = saved_in_template_argument_list_p;
return vec;
}
/* Parse a template-argument.
template-argument:
assignment-expression
type-id
id-expression
The representation is that of an assignment-expression, type-id, or
id-expression -- except that the qualified id-expression is
evaluated, so that the value returned is either a DECL or an
OVERLOAD.
Although the standard says "assignment-expression", it forbids
throw-expressions or assignments in the template argument.
Therefore, we use "conditional-expression" instead. */
static tree
cp_parser_template_argument (cp_parser* parser)
{
tree argument;
bool template_p;
bool address_p;
bool maybe_type_id = false;
cp_token *token;
cp_id_kind idk;
/* There's really no way to know what we're looking at, so we just
try each alternative in order.
[temp.arg]
In a template-argument, an ambiguity between a type-id and an
expression is resolved to a type-id, regardless of the form of
the corresponding template-parameter.
Therefore, we try a type-id first. */
cp_parser_parse_tentatively (parser);
argument = cp_parser_type_id (parser);
/* If there was no error parsing the type-id but the next token is a '>>',
we probably found a typo for '> >'. But there are type-id which are
also valid expressions. For instance:
struct X { int operator >> (int); };
template <int V> struct Foo {};
Foo<X () >> 5> r;
Here 'X()' is a valid type-id of a function type, but the user just
wanted to write the expression "X() >> 5". Thus, we remember that we
found a valid type-id, but we still try to parse the argument as an
expression to see what happens. */
if (!cp_parser_error_occurred (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
maybe_type_id = true;
cp_parser_abort_tentative_parse (parser);
}
else
{
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. This means that the argument is not a valid
type-id. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return argument;
}
/* We're still not sure what the argument will be. */
cp_parser_parse_tentatively (parser);
/* Try a template. */
argument = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (!cp_parser_error_occurred (parser))
{
/* Figure out what is being referred to. If the id-expression
was for a class template specialization, then we will have a
TYPE_DECL at this point. There is no need to do name lookup
at this point in that case. */
if (TREE_CODE (argument) != TYPE_DECL)
argument = cp_parser_lookup_name (parser, argument,
none_type,
/*is_template=*/template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL);
if (TREE_CODE (argument) != TEMPLATE_DECL
&& TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE)
cp_parser_error (parser, "expected template-name");
}
if (cp_parser_parse_definitely (parser))
return argument;
/* It must be a non-type argument. There permitted cases are given
in [temp.arg.nontype]:
-- an integral constant-expression of integral or enumeration
type; or
-- the name of a non-type template-parameter; or
-- the name of an object or function with external linkage...
-- the address of an object or function with external linkage...
-- a pointer to member... */
/* Look for a non-type template parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
/*adress_p=*/false,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
return argument;
}
/* If the next token is "&", the argument must be the address of an
object or function with external linkage. */
address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND);
if (address_p)
cp_lexer_consume_token (parser->lexer);
/* See if we might have an id-expression. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->keyword == RID_OPERATOR
|| token->type == CPP_SCOPE
|| token->type == CPP_TEMPLATE_ID
|| token->type == CPP_NESTED_NAME_SPECIFIER)
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
address_p,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (cp_parser_error_occurred (parser)
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_abort_tentative_parse (parser);
else
{
if (TREE_CODE (argument) == INDIRECT_REF)
{
gcc_assert (REFERENCE_REF_P (argument));
argument = TREE_OPERAND (argument, 0);
}
if (TREE_CODE (argument) == VAR_DECL)
{
/* A variable without external linkage might still be a
valid constant-expression, so no error is issued here
if the external-linkage check fails. */
if (!address_p && !DECL_EXTERNAL_LINKAGE_P (argument))
cp_parser_simulate_error (parser);
}
else if (is_overloaded_fn (argument))
/* All overloaded functions are allowed; if the external
linkage test does not pass, an error will be issued
later. */
;
else if (address_p
&& (TREE_CODE (argument) == OFFSET_REF
|| TREE_CODE (argument) == SCOPE_REF))
/* A pointer-to-member. */
;
else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX)
;
else
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
{
if (address_p)
argument = build_x_unary_op (ADDR_EXPR, argument);
return argument;
}
}
}
/* If the argument started with "&", there are no other valid
alternatives at this point. */
if (address_p)
{
cp_parser_error (parser, "invalid non-type template argument");
return error_mark_node;
}
/* If the argument wasn't successfully parsed as a type-id followed
by '>>', the argument can only be a constant expression now.
Otherwise, we try parsing the constant-expression tentatively,
because the argument could really be a type-id. */
if (maybe_type_id)
cp_parser_parse_tentatively (parser);
argument = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
/*non_constant_p=*/NULL);
argument = fold_non_dependent_expr (argument);
if (!maybe_type_id)
return argument;
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (cp_parser_parse_definitely (parser))
return argument;
/* We did our best to parse the argument as a non type-id, but that
was the only alternative that matched (albeit with a '>' after
it). We can assume it's just a typo from the user, and a
diagnostic will then be issued. */
return cp_parser_type_id (parser);
}
/* Parse an explicit-instantiation.
explicit-instantiation:
template declaration
Although the standard says `declaration', what it really means is:
explicit-instantiation:
template decl-specifier-seq [opt] declarator [opt] ;
Things like `template int S<int>::i = 5, int S<double>::j;' are not
supposed to be allowed. A defect report has been filed about this
issue.
GNU Extension:
explicit-instantiation:
storage-class-specifier template
decl-specifier-seq [opt] declarator [opt] ;
function-specifier template
decl-specifier-seq [opt] declarator [opt] ; */
static void
cp_parser_explicit_instantiation (cp_parser* parser)
{
int declares_class_or_enum;
cp_decl_specifier_seq decl_specifiers;
tree extension_specifier = NULL_TREE;
/* Look for an (optional) storage-class-specifier or
function-specifier. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
extension_specifier
= cp_parser_storage_class_specifier_opt (parser);
if (!extension_specifier)
extension_specifier
= cp_parser_function_specifier_opt (parser,
/*decl_specs=*/NULL);
}
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, "`template'");
/* Let the front end know that we are processing an explicit
instantiation. */
begin_explicit_instantiation ();
/* [temp.explicit] says that we are supposed to ignore access
control while processing explicit instantiation directives. */
push_deferring_access_checks (dk_no_check);
/* Parse a decl-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* If there was exactly one decl-specifier, and it declared a class,
and there's no declarator, then we have an explicit type
instantiation. */
if (declares_class_or_enum && cp_parser_declares_only_class_p (parser))
{
tree type;
type = check_tag_decl (&decl_specifiers);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
if (type)
do_type_instantiation (type, extension_specifier,
/*complain=*/tf_error);
}
else
{
cp_declarator *declarator;
tree decl;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers.type);
if (declarator != cp_error_declarator)
{
decl = grokdeclarator (declarator, &decl_specifiers,
NORMAL, 0, &decl_specifiers.attributes);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
/* Do the explicit instantiation. */
do_decl_instantiation (decl, extension_specifier);
}
else
{
pop_deferring_access_checks ();
/* Skip the body of the explicit instantiation. */
cp_parser_skip_to_end_of_statement (parser);
}
}
/* We're done with the instantiation. */
end_explicit_instantiation ();
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an explicit-specialization.
explicit-specialization:
template < > declaration
Although the standard says `declaration', what it really means is:
explicit-specialization:
template <> decl-specifier [opt] init-declarator [opt] ;
template <> function-definition
template <> explicit-specialization
template <> template-declaration */
static void
cp_parser_explicit_specialization (cp_parser* parser)
{
bool need_lang_pop;
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, "`template'");
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, "`<'");
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, "`>'");
/* We have processed another parameter list. */
++parser->num_template_parameter_lists;
/* [temp]
A template ... explicit specialization ... shall not have C
linkage. */
if (current_lang_name == lang_name_c)
{
error ("template specialization with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* Let the front end know that we are beginning a specialization. */
if (!begin_specialization ())
{
end_specialization ();
cp_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* If the next keyword is `template', we need to figure out whether
or not we're looking a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER)
cp_parser_template_declaration_after_export (parser,
/*member_p=*/false);
else
cp_parser_explicit_specialization (parser);
}
else
/* Parse the dependent declaration. */
cp_parser_single_declaration (parser,
/*checks=*/NULL,
/*member_p=*/false,
/*friend_p=*/NULL);
/* We're done with the specialization. */
end_specialization ();
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* We're done with this parameter list. */
--parser->num_template_parameter_lists;
}
/* Parse a type-specifier.
type-specifier:
simple-type-specifier
class-specifier
enum-specifier
elaborated-type-specifier
cv-qualifier
GNU Extension:
type-specifier:
__complex__
Returns a representation of the type-specifier. For a
class-specifier, enum-specifier, or elaborated-type-specifier, a
TREE_TYPE is returned; otherwise, a TYPE_DECL is returned.
The parser flags FLAGS is used to control type-specifier parsing.
If IS_DECLARATION is TRUE, then this type-specifier is appearing
in a decl-specifier-seq.
If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a
class-specifier, enum-specifier, or elaborated-type-specifier, then
*DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1
if a type is declared; 2 if it is defined. Otherwise, it is set to
zero.
If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a
cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it
is set to FALSE. */
static tree
cp_parser_type_specifier (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
bool is_declaration,
int* declares_class_or_enum,
bool* is_cv_qualifier)
{
tree type_spec = NULL_TREE;
cp_token *token;
enum rid keyword;
cp_decl_spec ds = ds_last;
/* Assume this type-specifier does not declare a new type. */
if (declares_class_or_enum)
*declares_class_or_enum = 0;
/* And that it does not specify a cv-qualifier. */
if (is_cv_qualifier)
*is_cv_qualifier = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, we can use that to guide the
production we choose. */
keyword = token->keyword;
switch (keyword)
{
case RID_ENUM:
/* Look for the enum-specifier. */
type_spec = cp_parser_enum_specifier (parser);
/* If that worked, we're done. */
if (type_spec)
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
/*user_defined_p=*/true);
return type_spec;
}
else
goto elaborated_type_specifier;
/* Any of these indicate either a class-specifier, or an
elaborated-type-specifier. */
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
/* Parse tentatively so that we can back up if we don't find a
class-specifier. */
cp_parser_parse_tentatively (parser);
/* Look for the class-specifier. */
type_spec = cp_parser_class_specifier (parser);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
/*user_defined_p=*/true);
return type_spec;
}
/* Fall through. */
elaborated_type_specifier:
/* We're declaring (not defining) a class or enum. */
if (declares_class_or_enum)
*declares_class_or_enum = 1;
/* Fall through. */
case RID_TYPENAME:
/* Look for an elaborated-type-specifier. */
type_spec
= (cp_parser_elaborated_type_specifier
(parser,
decl_specs && decl_specs->specs[(int) ds_friend],
is_declaration));
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
/*user_defined_p=*/true);
return type_spec;
case RID_CONST:
ds = ds_const;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_VOLATILE:
ds = ds_volatile;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_RESTRICT:
ds = ds_restrict;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_COMPLEX:
/* The `__complex__' keyword is a GNU extension. */
ds = ds_complex;
break;
default:
break;
}
/* Handle simple keywords. */
if (ds != ds_last)
{
if (decl_specs)
{
++decl_specs->specs[(int)ds];
decl_specs->any_specifiers_p = true;
}
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* If we do not already have a type-specifier, assume we are looking
at a simple-type-specifier. */
type_spec = cp_parser_simple_type_specifier (parser,
decl_specs,
flags);
/* If we didn't find a type-specifier, and a type-specifier was not
optional in this context, issue an error message. */
if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type specifier");
return error_mark_node;
}
return type_spec;
}
/* Parse a simple-type-specifier.
simple-type-specifier:
:: [opt] nested-name-specifier [opt] type-name
:: [opt] nested-name-specifier template template-id
char
wchar_t
bool
short
int
long
signed
unsigned
float
double
void
GNU Extension:
simple-type-specifier:
__typeof__ unary-expression
__typeof__ ( type-id )
Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is
appropriately updated. */
static tree
cp_parser_simple_type_specifier (cp_parser* parser,
cp_decl_specifier_seq *decl_specs,
cp_parser_flags flags)
{
tree type = NULL_TREE;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, things are easy. */
switch (token->keyword)
{
case RID_CHAR:
if (decl_specs)
decl_specs->explicit_char_p = true;
type = char_type_node;
break;
case RID_WCHAR:
type = wchar_type_node;
break;
case RID_BOOL:
type = boolean_type_node;
break;
case RID_SHORT:
if (decl_specs)
++decl_specs->specs[(int) ds_short];
type = short_integer_type_node;
break;
case RID_INT:
if (decl_specs)
decl_specs->explicit_int_p = true;
type = integer_type_node;
break;
case RID_LONG:
if (decl_specs)
++decl_specs->specs[(int) ds_long];
type = long_integer_type_node;
break;
case RID_SIGNED:
if (decl_specs)
++decl_specs->specs[(int) ds_signed];
type = integer_type_node;
break;
case RID_UNSIGNED:
if (decl_specs)
++decl_specs->specs[(int) ds_unsigned];
type = unsigned_type_node;
break;
case RID_FLOAT:
type = float_type_node;
break;
case RID_DOUBLE:
type = double_type_node;
break;
case RID_VOID:
type = void_type_node;
break;
case RID_TYPEOF:
/* Consume the `typeof' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the operand to `typeof'. */
type = cp_parser_sizeof_operand (parser, RID_TYPEOF);
/* If it is not already a TYPE, take its type. */
if (!TYPE_P (type))
type = finish_typeof (type);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
/*user_defined_p=*/true);
return type;
default:
break;
}
/* If the type-specifier was for a built-in type, we're done. */
if (type)
{
tree id;
/* Record the type. */
if (decl_specs
&& (token->keyword != RID_SIGNED
&& token->keyword != RID_UNSIGNED
&& token->keyword != RID_SHORT
&& token->keyword != RID_LONG))
cp_parser_set_decl_spec_type (decl_specs,
type,
/*user_defined=*/false);
if (decl_specs)
decl_specs->any_specifiers_p = true;
/* Consume the token. */
id = cp_lexer_consume_token (parser->lexer)->u.value;
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user thought
that the type was a template. */
cp_parser_check_for_invalid_template_id (parser, type);
return TYPE_NAME (type);
}
/* The type-specifier must be a user-defined type. */
if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES))
{
bool qualified_p;
bool global_p;
/* Don't gobble tokens or issue error messages if this is an
optional type-specifier. */
if (flags & CP_PARSER_FLAGS_OPTIONAL)
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
global_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the nested-name specifier. */
qualified_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
/* If we have seen a nested-name-specifier, and the next token
is `template', then we are using the template-id production. */
if (parser->scope
&& cp_parser_optional_template_keyword (parser))
{
/* Look for the template-id. */
type = cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/true,
/*is_declaration=*/false);
/* If the template-id did not name a type, we are out of
luck. */
if (TREE_CODE (type) != TYPE_DECL)
{
cp_parser_error (parser, "expected template-id for type");
type = NULL_TREE;
}
}
/* Otherwise, look for a type-name. */
else
type = cp_parser_type_name (parser);
/* Keep track of all name-lookups performed in class scopes. */
if (type
&& !global_p
&& !qualified_p
&& TREE_CODE (type) == TYPE_DECL
&& TREE_CODE (DECL_NAME (type)) == IDENTIFIER_NODE)
maybe_note_name_used_in_class (DECL_NAME (type), type);
/* If it didn't work out, we don't have a TYPE. */
if ((flags & CP_PARSER_FLAGS_OPTIONAL)
&& !cp_parser_parse_definitely (parser))
type = NULL_TREE;
if (type && decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
/*user_defined=*/true);
}
/* If we didn't get a type-name, issue an error message. */
if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type-name");
return error_mark_node;
}
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user thought
that the type was a template. */
if (type && type != error_mark_node)
{
/* As a last-ditch effort, see if TYPE is an Objective-C type.
If it is, then the '<'...'>' enclose protocol names rather than
template arguments, and so everything is fine. */
if (c_dialect_objc ()
&& (objc_is_id (type) || objc_is_class_name (type)))
{
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree qual_type = objc_get_protocol_qualified_type (type, protos);
/* Clobber the "unqualified" type previously entered into
DECL_SPECS with the new, improved protocol-qualified version. */
if (decl_specs)
decl_specs->type = qual_type;
return qual_type;
}
cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type));
}
return type;
}
/* Parse a type-name.
type-name:
class-name
enum-name
typedef-name
enum-name:
identifier
typedef-name:
identifier
Returns a TYPE_DECL for the type. */
static tree
cp_parser_type_name (cp_parser* parser)
{
tree type_decl;
tree identifier;
/* We can't know yet whether it is a class-name or not. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If it's not a class-name, keep looking. */
if (!cp_parser_parse_definitely (parser))
{
/* It must be a typedef-name or an enum-name. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the type-name. */
type_decl = cp_parser_lookup_name_simple (parser, identifier);
if (TREE_CODE (type_decl) != TYPE_DECL
&& (objc_is_id (identifier) || objc_is_class_name (identifier)))
{
/* See if this is an Objective-C type. */
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree type = objc_get_protocol_qualified_type (identifier, protos);
if (type)
type_decl = TYPE_NAME (type);
}
/* Issue an error if we did not find a type-name. */
if (TREE_CODE (type_decl) != TYPE_DECL)
{
if (!cp_parser_simulate_error (parser))
cp_parser_name_lookup_error (parser, identifier, type_decl,
"is not a type");
type_decl = error_mark_node;
}
/* Remember that the name was used in the definition of the
current class so that we can check later to see if the
meaning would have been different after the class was
entirely defined. */
else if (type_decl != error_mark_node
&& !parser->scope)
maybe_note_name_used_in_class (identifier, type_decl);
}
return type_decl;
}
/* Parse an elaborated-type-specifier. Note that the grammar given
here incorporates the resolution to DR68.
elaborated-type-specifier:
class-key :: [opt] nested-name-specifier [opt] identifier
class-key :: [opt] nested-name-specifier [opt] template [opt] template-id
enum :: [opt] nested-name-specifier [opt] identifier
typename :: [opt] nested-name-specifier identifier
typename :: [opt] nested-name-specifier template [opt]
template-id
GNU extension:
elaborated-type-specifier:
class-key attributes :: [opt] nested-name-specifier [opt] identifier
class-key attributes :: [opt] nested-name-specifier [opt]
template [opt] template-id
enum attributes :: [opt] nested-name-specifier [opt] identifier
If IS_FRIEND is TRUE, then this elaborated-type-specifier is being
declared `friend'. If IS_DECLARATION is TRUE, then this
elaborated-type-specifier appears in a decl-specifiers-seq, i.e.,
something is being declared.
Returns the TYPE specified. */
static tree
cp_parser_elaborated_type_specifier (cp_parser* parser,
bool is_friend,
bool is_declaration)
{
enum tag_types tag_type;
tree identifier;
tree type = NULL_TREE;
tree attributes = NULL_TREE;
/* See if we're looking at the `enum' keyword. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM))
{
/* Consume the `enum' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's an enumeration type. */
tag_type = enum_type;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Or, it might be `typename'. */
else if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TYPENAME))
{
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's a `typename' type. */
tag_type = typename_type;
/* The `typename' keyword is only allowed in templates. */
if (!processing_template_decl)
pedwarn ("using %<typename%> outside of template");
}
/* Otherwise it must be a class-key. */
else
{
tag_type = cp_parser_class_key (parser);
if (tag_type == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Look for the `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
if (tag_type == typename_type)
{
if (!cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration))
return error_mark_node;
}
else
/* Even though `typename' is not present, the proposed resolution
to Core Issue 180 says that in `class A<T>::B', `B' should be
considered a type-name, even if `A<T>' is dependent. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration);
/* For everything but enumeration types, consider a template-id.
For an enumeration type, consider only a plain identifier. */
if (tag_type != enum_type)
{
bool template_p = false;
tree decl;
/* Allow the `template' keyword. */
template_p = cp_parser_optional_template_keyword (parser);
/* If we didn't see `template', we don't know if there's a
template-id or not. */
if (!template_p)
cp_parser_parse_tentatively (parser);
/* Parse the template-id. */
decl = cp_parser_template_id (parser, template_p,
/*check_dependency_p=*/true,
is_declaration);
/* If we didn't find a template-id, look for an ordinary
identifier. */
if (!template_p && !cp_parser_parse_definitely (parser))
;
/* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is
in effect, then we must assume that, upon instantiation, the
template will correspond to a class. */
else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& tag_type == typename_type)
type = make_typename_type (parser->scope, decl,
typename_type,
/*complain=*/tf_error);
else
type = TREE_TYPE (decl);
}
if (!type)
{
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
{
parser->scope = NULL_TREE;
return error_mark_node;
}
/* For a `typename', we needn't call xref_tag. */
if (tag_type == typename_type
&& TREE_CODE (parser->scope) != NAMESPACE_DECL)
return cp_parser_make_typename_type (parser, parser->scope,
identifier);
/* Look up a qualified name in the usual way. */
if (parser->scope)
{
tree decl;
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL);
/* If we are parsing friend declaration, DECL may be a
TEMPLATE_DECL tree node here. However, we need to check
whether this TEMPLATE_DECL results in valid code. Consider
the following example:
namespace N {
template <class T> class C {};
}
class X {
template <class T> friend class N::C; // #1, valid code
};
template <class T> class Y {
friend class N::C; // #2, invalid code
};
For both case #1 and #2, we arrive at a TEMPLATE_DECL after
name lookup of `N::C'. We see that friend declaration must
be template for the code to be valid. Note that
processing_template_decl does not work here since it is
always 1 for the above two cases. */
decl = (cp_parser_maybe_treat_template_as_class
(decl, /*tag_name_p=*/is_friend
&& parser->num_template_parameter_lists));
if (TREE_CODE (decl) != TYPE_DECL)
{
cp_parser_diagnose_invalid_type_name (parser,
parser->scope,
identifier);
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE)
{
bool allow_template = (parser->num_template_parameter_lists
|| DECL_SELF_REFERENCE_P (decl));
type = check_elaborated_type_specifier (tag_type, decl,
allow_template);
if (type == error_mark_node)
return error_mark_node;
}
type = TREE_TYPE (decl);
}
else
{
/* An elaborated-type-specifier sometimes introduces a new type and
sometimes names an existing type. Normally, the rule is that it
introduces a new type only if there is not an existing type of
the same name already in scope. For example, given:
struct S {};
void f() { struct S s; }
the `struct S' in the body of `f' is the same `struct S' as in
the global scope; the existing definition is used. However, if
there were no global declaration, this would introduce a new
local class named `S'.
An exception to this rule applies to the following code:
namespace N { struct S; }
Here, the elaborated-type-specifier names a new type
unconditionally; even if there is already an `S' in the
containing scope this declaration names a new type.
This exception only applies if the elaborated-type-specifier
forms the complete declaration:
[class.name]
A declaration consisting solely of `class-key identifier ;' is
either a redeclaration of the name in the current scope or a
forward declaration of the identifier as a class name. It
introduces the name into the current scope.
We are in this situation precisely when the next token is a `;'.
An exception to the exception is that a `friend' declaration does
*not* name a new type; i.e., given:
struct S { friend struct T; };
`T' is not a new type in the scope of `S'.
Also, `new struct S' or `sizeof (struct S)' never results in the
definition of a new type; a new type can only be declared in a
declaration context. */
tag_scope ts;
bool template_p;
if (is_friend)
/* Friends have special name lookup rules. */
ts = ts_within_enclosing_non_class;
else if (is_declaration
&& cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
/* This is a `class-key identifier ;' */
ts = ts_current;
else
ts = ts_global;
template_p =
(parser->num_template_parameter_lists
&& (cp_parser_next_token_starts_class_definition_p (parser)
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)));
/* An unqualified name was used to reference this type, so
there were no qualifying templates. */
if (!cp_parser_check_template_parameters (parser,
/*num_templates=*/0))
return error_mark_node;
type = xref_tag (tag_type, identifier, ts, template_p);
}
}
if (type == error_mark_node)
return error_mark_node;
/* Allow attributes on forward declarations of classes. */
if (attributes)
{
if (TREE_CODE (type) == TYPENAME_TYPE)
warning (OPT_Wattributes,
"attributes ignored on uninstantiated type");
else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type)
&& ! processing_explicit_instantiation)
warning (OPT_Wattributes,
"attributes ignored on template instantiation");
else if (is_declaration && cp_parser_declares_only_class_p (parser))
cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
else
warning (OPT_Wattributes,
"attributes ignored on elaborated-type-specifier that is not a forward declaration");
}
if (tag_type != enum_type)
cp_parser_check_class_key (tag_type, type);
/* A "<" cannot follow an elaborated type specifier. If that
happens, the user was probably trying to form a template-id. */
cp_parser_check_for_invalid_template_id (parser, type);
return type;
}
/* Parse an enum-specifier.
enum-specifier:
enum identifier [opt] { enumerator-list [opt] }
GNU Extensions:
enum attributes[opt] identifier [opt] { enumerator-list [opt] }
attributes[opt]
Returns an ENUM_TYPE representing the enumeration, or NULL_TREE
if the token stream isn't an enum-specifier after all. */
static tree
cp_parser_enum_specifier (cp_parser* parser)
{
tree identifier;
tree type;
tree attributes;
/* Parse tentatively so that we can back up if we don't find a
enum-specifier. */
cp_parser_parse_tentatively (parser);
/* Caller guarantees that the current token is 'enum', an identifier
possibly follows, and the token after that is an opening brace.
If we don't have an identifier, fabricate an anonymous name for
the enumeration being defined. */
cp_lexer_consume_token (parser->lexer);
attributes = cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = make_anon_name ();
/* Look for the `{' but don't consume it yet. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
return NULL_TREE;
/* Issue an error message if type-definitions are forbidden here. */
if (!cp_parser_check_type_definition (parser))
type = error_mark_node;
else
/* Create the new type. We do this before consuming the opening
brace so the enum will be recorded as being on the line of its
tag (or the 'enum' keyword, if there is no tag). */
type = start_enum (identifier);
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
if (type == error_mark_node)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
/* If the next token is not '}', then there are some enumerators. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE))
cp_parser_enumerator_list (parser, type);
/* Consume the final '}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
/* Look for trailing attributes to apply to this enumeration, and
apply them if appropriate. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
tree trailing_attr = cp_parser_attributes_opt (parser);
cplus_decl_attributes (&type,
trailing_attr,
(int) ATTR_FLAG_TYPE_IN_PLACE);
}
/* Finish up the enumeration. */
finish_enum (type);
return type;
}
/* Parse an enumerator-list. The enumerators all have the indicated
TYPE.
enumerator-list:
enumerator-definition
enumerator-list , enumerator-definition */
static void
cp_parser_enumerator_list (cp_parser* parser, tree type)
{
while (true)
{
/* Parse an enumerator-definition. */
cp_parser_enumerator_definition (parser, type);
/* If the next token is not a ',', we've reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
/* If the next token is a `}', there is a trailing comma. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
if (pedantic && !in_system_header)
pedwarn ("comma at end of enumerator list");
break;
}
}
}
/* Parse an enumerator-definition. The enumerator has the indicated
TYPE.
enumerator-definition:
enumerator
enumerator = constant-expression
enumerator:
identifier */
static void
cp_parser_enumerator_definition (cp_parser* parser, tree type)
{
tree identifier;
tree value;
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* If the next token is an '=', then there is an explicit value. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the value. */
value = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
}
else
value = NULL_TREE;
/* Create the enumerator. */
build_enumerator (identifier, value, type);
}
/* Parse a namespace-name.
namespace-name:
original-namespace-name
namespace-alias
Returns the NAMESPACE_DECL for the namespace. */
static tree
cp_parser_namespace_name (cp_parser* parser)
{
tree identifier;
tree namespace_decl;
/* Get the name of the namespace. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the identifier in the currently active scope. Look only
for namespaces, due to:
[basic.lookup.udir]
When looking up a namespace-name in a using-directive or alias
definition, only namespace names are considered.
And:
[basic.lookup.qual]
During the lookup of a name preceding the :: scope resolution
operator, object, function, and enumerator names are ignored.
(Note that cp_parser_class_or_namespace_name only calls this
function if the token after the name is the scope resolution
operator.) */
namespace_decl = cp_parser_lookup_name (parser, identifier,
none_type,
/*is_template=*/false,
/*is_namespace=*/true,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL);
/* If it's not a namespace, issue an error. */
if (namespace_decl == error_mark_node
|| TREE_CODE (namespace_decl) != NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error ("%qD is not a namespace-name", identifier);
cp_parser_error (parser, "expected namespace-name");
namespace_decl = error_mark_node;
}
return namespace_decl;
}
/* Parse a namespace-definition.
namespace-definition:
named-namespace-definition
unnamed-namespace-definition
named-namespace-definition:
original-namespace-definition
extension-namespace-definition
original-namespace-definition:
namespace identifier { namespace-body }
extension-namespace-definition:
namespace original-namespace-name { namespace-body }
unnamed-namespace-definition:
namespace { namespace-body } */
static void
cp_parser_namespace_definition (cp_parser* parser)
{
tree identifier, attribs;
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, "`namespace'");
/* Get the name of the namespace. We do not attempt to distinguish
between an original-namespace-definition and an
extension-namespace-definition at this point. The semantic
analysis routines are responsible for that. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Parse any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Look for the `{' to start the namespace. */
cp_parser_require (parser, CPP_OPEN_BRACE, "`{'");
/* Start the namespace. */
push_namespace_with_attribs (identifier, attribs);
/* Parse the body of the namespace. */
cp_parser_namespace_body (parser);
/* Finish the namespace. */
pop_namespace ();
/* Look for the final `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
}
/* Parse a namespace-body.
namespace-body:
declaration-seq [opt] */
static void
cp_parser_namespace_body (cp_parser* parser)
{
cp_parser_declaration_seq_opt (parser);
}
/* Parse a namespace-alias-definition.
namespace-alias-definition:
namespace identifier = qualified-namespace-specifier ; */
static void
cp_parser_namespace_alias_definition (cp_parser* parser)
{
tree identifier;
tree namespace_specifier;
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, "`namespace'");
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* Look for the `=' token. */
cp_parser_require (parser, CPP_EQ, "`='");
/* Look for the qualified-namespace-specifier. */
namespace_specifier
= cp_parser_qualified_namespace_specifier (parser);
/* Look for the `;' token. */
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
/* Register the alias in the symbol table. */
do_namespace_alias (identifier, namespace_specifier);
}
/* Parse a qualified-namespace-specifier.
qualified-namespace-specifier:
:: [opt] nested-name-specifier [opt] namespace-name
Returns a NAMESPACE_DECL corresponding to the specified
namespace. */
static tree
cp_parser_qualified_namespace_specifier (cp_parser* parser)
{
/* Look for the optional `::'. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
return cp_parser_namespace_name (parser);
}
/* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an
access declaration.
using-declaration:
using typename [opt] :: [opt] nested-name-specifier unqualified-id ;
using :: unqualified-id ;
access-declaration:
qualified-id ;
*/
static bool
cp_parser_using_declaration (cp_parser* parser,
bool access_declaration_p)
{
cp_token *token;
bool typename_p = false;
bool global_scope_p;
tree decl;
tree identifier;
tree qscope;
if (access_declaration_p)
cp_parser_parse_tentatively (parser);
else
{
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, "`using'");
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's `typename'. */
if (token->keyword == RID_TYPENAME)
{
/* Remember that we've seen it. */
typename_p = true;
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Look for the optional global scope qualification. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* If we saw `typename', or didn't see `::', then there must be a
nested-name-specifier present. */
if (typename_p || !global_scope_p)
qscope = cp_parser_nested_name_specifier (parser, typename_p,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
/* Otherwise, we could be in either of the two productions. In that
case, treat the nested-name-specifier as optional. */
else
qscope = cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
if (!qscope)
qscope = global_namespace;
if (access_declaration_p && cp_parser_error_occurred (parser))
/* Something has already gone wrong; there's no need to parse
further. Since an error has occurred, the return value of
cp_parser_parse_definitely will be false, as required. */
return cp_parser_parse_definitely (parser);
/* Parse the unqualified-id. */
identifier = cp_parser_unqualified_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*declarator_p=*/true,
/*optional_p=*/false);
if (access_declaration_p)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
return false;
}
/* The function we call to handle a using-declaration is different
depending on what scope we are in. */
if (qscope == error_mark_node || identifier == error_mark_node)
;
else if (TREE_CODE (identifier) != IDENTIFIER_NODE
&& TREE_CODE (identifier) != BIT_NOT_EXPR)
/* [namespace.udecl]
A using declaration shall not name a template-id. */
error ("a template-id may not appear in a using-declaration");
else
{
if (at_class_scope_p ())
{
/* Create the USING_DECL. */
decl = do_class_using_decl (parser->scope, identifier);
/* Add it to the list of members in this class. */
finish_member_declaration (decl);
}
else
{
decl = cp_parser_lookup_name_simple (parser, identifier);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, identifier, decl, NULL);
else if (!at_namespace_scope_p ())
do_local_using_decl (decl, qscope, identifier);
else
do_toplevel_using_decl (decl, qscope, identifier);
}
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
return true;
}
/* Parse a using-directive.
using-directive:
using namespace :: [opt] nested-name-specifier [opt]
namespace-name ; */
static void
cp_parser_using_directive (cp_parser* parser)
{
tree namespace_decl;
tree attribs;
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, "`using'");
/* And the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, "`namespace'");
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* And the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
/* Get the namespace being used. */
namespace_decl = cp_parser_namespace_name (parser);
/* And any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Update the symbol table. */
parse_using_directive (namespace_decl, attribs);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
}
/* Parse an asm-definition.
asm-definition:
asm ( string-literal ) ;
GNU Extension:
asm-definition:
asm volatile [opt] ( string-literal ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt]
: asm-operand-list [opt] ) ; */
static void
cp_parser_asm_definition (cp_parser* parser)
{
tree string;
tree outputs = NULL_TREE;
tree inputs = NULL_TREE;
tree clobbers = NULL_TREE;
tree asm_stmt;
bool volatile_p = false;
bool extended_p = false;
/* Look for the `asm' keyword. */
cp_parser_require_keyword (parser, RID_ASM, "`asm'");
/* See if the next token is `volatile'. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE))
{
/* Remember that we saw the `volatile' keyword. */
volatile_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
/* Look for the opening `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
return;
/* Look for the string. */
string = cp_parser_string_literal (parser, false, false);
if (string == error_mark_node)
{
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
return;
}
/* If we're allowing GNU extensions, check for the extended assembly
syntax. Unfortunately, the `:' tokens need not be separated by
a space in C, and so, for compatibility, we tolerate that here
too. Doing that means that we have to treat the `::' operator as
two `:' tokens. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& parser->in_function_body
&& (cp_lexer_next_token_is (parser->lexer, CPP_COLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)))
{
bool inputs_p = false;
bool clobbers_p = false;
/* The extended syntax was used. */
extended_p = true;
/* Look for outputs. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_SCOPE)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
outputs = cp_parser_asm_operand_list (parser);
}
/* If the next token is `::', there are no outputs, and the
next token is the beginning of the inputs. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The inputs are coming next. */
inputs_p = true;
/* Look for inputs. */
if (inputs_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
inputs = cp_parser_asm_operand_list (parser);
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The clobbers are coming next. */
clobbers_p = true;
/* Look for clobbers. */
if (clobbers_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the clobbers. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
clobbers = cp_parser_asm_clobber_list (parser);
}
}
/* Look for the closing `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
/* Create the ASM_EXPR. */
if (parser->in_function_body)
{
asm_stmt = finish_asm_stmt (volatile_p, string, outputs,
inputs, clobbers);
/* If the extended syntax was not used, mark the ASM_EXPR. */
if (!extended_p)
{
tree temp = asm_stmt;
if (TREE_CODE (temp) == CLEANUP_POINT_EXPR)
temp = TREE_OPERAND (temp, 0);
ASM_INPUT_P (temp) = 1;
}
}
else
cgraph_add_asm_node (string);
}
/* Declarators [gram.dcl.decl] */
/* Parse an init-declarator.
init-declarator:
declarator initializer [opt]
GNU Extension:
init-declarator:
declarator asm-specification [opt] attributes [opt] initializer [opt]
function-definition:
decl-specifier-seq [opt] declarator ctor-initializer [opt]
function-body
decl-specifier-seq [opt] declarator function-try-block
GNU Extension:
function-definition:
__extension__ function-definition
The DECL_SPECIFIERS apply to this declarator. Returns a
representation of the entity declared. If MEMBER_P is TRUE, then
this declarator appears in a class scope. The new DECL created by
this declarator is returned.
The CHECKS are access checks that should be performed once we know
what entity is being declared (and, therefore, what classes have
befriended it).
If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and
for a function-definition here as well. If the declarator is a
declarator for a function-definition, *FUNCTION_DEFINITION_P will
be TRUE upon return. By that point, the function-definition will
have been completely parsed.
FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P
is FALSE. */
static tree
cp_parser_init_declarator (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
VEC (deferred_access_check,gc)* checks,
bool function_definition_allowed_p,
bool member_p,
int declares_class_or_enum,
bool* function_definition_p)
{
cp_token *token;
cp_declarator *declarator;
tree prefix_attributes;
tree attributes;
tree asm_specification;
tree initializer;
tree decl = NULL_TREE;
tree scope;
bool is_initialized;
/* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if
initialized with "= ..", CPP_OPEN_PAREN if initialized with
"(...)". */
enum cpp_ttype initialization_kind;
bool is_parenthesized_init = false;
bool is_non_constant_init;
int ctor_dtor_or_conv_p;
bool friend_p;
tree pushed_scope = NULL;
/* Gather the attributes that were provided with the
decl-specifiers. */
prefix_attributes = decl_specifiers->attributes;
/* Assume that this is not the declarator for a function
definition. */
if (function_definition_p)
*function_definition_p = false;
/* Defer access checks while parsing the declarator; we cannot know
what names are accessible until we know what is being
declared. */
resume_deferring_access_checks ();
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Gather up the deferred checks. */
stop_deferring_access_checks ();
/* If the DECLARATOR was erroneous, there's no need to go
further. */
if (declarator == cp_error_declarator)
return error_mark_node;
/* Check that the number of template-parameter-lists is OK. */
if (!cp_parser_check_declarator_template_parameters (parser, declarator))
return error_mark_node;
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers->type);
/* Figure out what scope the entity declared by the DECLARATOR is
located in. `grokdeclarator' sometimes changes the scope, so
we compute it now. */
scope = get_scope_of_declarator (declarator);
/* If we're allowing GNU extensions, look for an asm-specification
and attributes. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
/* Look for an asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* And attributes. */
attributes = cp_parser_attributes_opt (parser);
}
else
{
asm_specification = NULL_TREE;
attributes = NULL_TREE;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check to see if the token indicates the start of a
function-definition. */
if (cp_parser_token_starts_function_definition_p (token))
{
if (!function_definition_allowed_p)
{
/* If a function-definition should not appear here, issue an
error message. */
cp_parser_error (parser,
"a function-definition is not allowed here");
return error_mark_node;
}
else
{
/* Neither attributes nor an asm-specification are allowed
on a function-definition. */
if (asm_specification)
error ("an asm-specification is not allowed on a function-definition");
if (attributes)
error ("attributes are not allowed on a function-definition");
/* This is a function-definition. */
*function_definition_p = true;
/* Parse the function definition. */
if (member_p)
decl = cp_parser_save_member_function_body (parser,
decl_specifiers,
declarator,
prefix_attributes);
else
decl
= (cp_parser_function_definition_from_specifiers_and_declarator
(parser, decl_specifiers, prefix_attributes, declarator));
return decl;
}
}
/* [dcl.dcl]
Only in function declarations for constructors, destructors, and
type conversions can the decl-specifier-seq be omitted.
We explicitly postpone this check past the point where we handle
function-definitions because we tolerate function-definitions
that are missing their return types in some modes. */
if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0)
{
cp_parser_error (parser,
"expected constructor, destructor, or type conversion");
return error_mark_node;
}
/* An `=' or an `(' indicates an initializer. */
if (token->type == CPP_EQ
|| token->type == CPP_OPEN_PAREN)
{
is_initialized = true;
initialization_kind = token->type;
}
else
{
/* If the init-declarator isn't initialized and isn't followed by a
`,' or `;', it's not a valid init-declarator. */
if (token->type != CPP_COMMA
&& token->type != CPP_SEMICOLON)
{
cp_parser_error (parser, "expected initializer");
return error_mark_node;
}
is_initialized = false;
initialization_kind = CPP_EOF;
}
/* Because start_decl has side-effects, we should only call it if we
know we're going ahead. By this point, we know that we cannot
possibly be looking at any other construct. */
cp_parser_commit_to_tentative_parse (parser);
/* If the decl specifiers were bad, issue an error now that we're
sure this was intended to be a declarator. Then continue
declaring the variable(s), as int, to try to cut down on further
errors. */
if (decl_specifiers->any_specifiers_p
&& decl_specifiers->type == error_mark_node)
{
cp_parser_error (parser, "invalid type in declaration");
decl_specifiers->type = integer_type_node;
}
/* Check to see whether or not this declaration is a friend. */
friend_p = cp_parser_friend_p (decl_specifiers);
/* Enter the newly declared entry in the symbol table. If we're
processing a declaration in a class-specifier, we wait until
after processing the initializer. */
if (!member_p)
{
if (parser->in_unbraced_linkage_specification_p)
decl_specifiers->storage_class = sc_extern;
decl = start_decl (declarator, decl_specifiers,
is_initialized, attributes, prefix_attributes,
&pushed_scope);
}
else if (scope)
/* Enter the SCOPE. That way unqualified names appearing in the
initializer will be looked up in SCOPE. */
pushed_scope = push_scope (scope);
/* Perform deferred access control checks, now that we know in which
SCOPE the declared entity resides. */
if (!member_p && decl)
{
tree saved_current_function_decl = NULL_TREE;
/* If the entity being declared is a function, pretend that we
are in its scope. If it is a `friend', it may have access to
things that would not otherwise be accessible. */
if (TREE_CODE (decl) == FUNCTION_DECL)
{
saved_current_function_decl = current_function_decl;
current_function_decl = decl;
}
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
/* Perform the access control checks for the declarator and the
the decl-specifiers. */
perform_deferred_access_checks ();
/* Restore the saved value. */
if (TREE_CODE (decl) == FUNCTION_DECL)
current_function_decl = saved_current_function_decl;
}
/* Parse the initializer. */
initializer = NULL_TREE;
is_parenthesized_init = false;
is_non_constant_init = true;
if (is_initialized)
{
if (function_declarator_p (declarator))
{
if (initialization_kind == CPP_EQ)
initializer = cp_parser_pure_specifier (parser);
else
{
/* If the declaration was erroneous, we don't really
know what the user intended, so just silently
consume the initializer. */
if (decl != error_mark_node)
error ("initializer provided for function");
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
}
else
initializer = cp_parser_initializer (parser,
&is_parenthesized_init,
&is_non_constant_init);
}
/* The old parser allows attributes to appear after a parenthesized
initializer. Mark Mitchell proposed removing this functionality
on the GCC mailing lists on 2002-08-13. This parser accepts the
attributes -- but ignores them. */
if (cp_parser_allow_gnu_extensions_p (parser) && is_parenthesized_init)
if (cp_parser_attributes_opt (parser))
warning (OPT_Wattributes,
"attributes after parenthesized initializer ignored");
/* For an in-class declaration, use `grokfield' to create the
declaration. */
if (member_p)
{
if (pushed_scope)
{
pop_scope (pushed_scope);
pushed_scope = false;
}
decl = grokfield (declarator, decl_specifiers,
initializer, !is_non_constant_init,
/*asmspec=*/NULL_TREE,
prefix_attributes);
if (decl && TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
}
/* Finish processing the declaration. But, skip friend
declarations. */
if (!friend_p && decl && decl != error_mark_node)
{
cp_finish_decl (decl,
initializer, !is_non_constant_init,
asm_specification,
/* If the initializer is in parentheses, then this is
a direct-initialization, which means that an
`explicit' constructor is OK. Otherwise, an
`explicit' constructor cannot be used. */
((is_parenthesized_init || !is_initialized)
? 0 : LOOKUP_ONLYCONVERTING));
}
if (!friend_p && pushed_scope)
pop_scope (pushed_scope);
return decl;
}
/* Parse a declarator.
declarator:
direct-declarator
ptr-operator declarator
abstract-declarator:
ptr-operator abstract-declarator [opt]
direct-abstract-declarator
GNU Extensions:
declarator:
attributes [opt] direct-declarator
attributes [opt] ptr-operator declarator
abstract-declarator:
attributes [opt] ptr-operator abstract-declarator [opt]
attributes [opt] direct-abstract-declarator
If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to
detect constructor, destructor or conversion operators. It is set
to -1 if the declarator is a name, and +1 if it is a
function. Otherwise it is set to zero. Usually you just want to
test for >0, but internally the negative value is used.
(The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have
a decl-specifier-seq unless it declares a constructor, destructor,
or conversion. It might seem that we could check this condition in
semantic analysis, rather than parsing, but that makes it difficult
to handle something like `f()'. We want to notice that there are
no decl-specifiers, and therefore realize that this is an
expression, not a declaration.)
If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff
the declarator is a direct-declarator of the form "(...)".
MEMBER_P is true iff this declarator is a member-declarator. */
static cp_declarator *
cp_parser_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool* parenthesized_p,
bool member_p)
{
cp_token *token;
cp_declarator *declarator;
enum tree_code code;
cp_cv_quals cv_quals;
tree class_type;
tree attributes = NULL_TREE;
/* Assume this is not a constructor, destructor, or type-conversion
operator. */
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
if (cp_parser_allow_gnu_extensions_p (parser))
attributes = cp_parser_attributes_opt (parser);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for the ptr-operator production. */
cp_parser_parse_tentatively (parser);
/* Parse the ptr-operator. */
code = cp_parser_ptr_operator (parser,
&class_type,
&cv_quals);
/* If that worked, then we have a ptr-operator. */
if (cp_parser_parse_definitely (parser))
{
/* If a ptr-operator was found, then this declarator was not
parenthesized. */
if (parenthesized_p)
*parenthesized_p = true;
/* The dependent declarator is optional if we are parsing an
abstract-declarator. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED)
cp_parser_parse_tentatively (parser);
/* Parse the dependent declarator. */
declarator = cp_parser_declarator (parser, dcl_kind,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* If we are parsing an abstract-declarator, we must handle the
case where the dependent declarator is absent. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED
&& !cp_parser_parse_definitely (parser))
declarator = NULL;
/* Build the representation of the ptr-operator. */
if (class_type)
declarator = make_ptrmem_declarator (cv_quals,
class_type,
declarator);
else if (code == INDIRECT_REF)
declarator = make_pointer_declarator (cv_quals, declarator);
else
declarator = make_reference_declarator (cv_quals, declarator);
}
/* Everything else is a direct-declarator. */
else
{
if (parenthesized_p)
*parenthesized_p = cp_lexer_next_token_is (parser->lexer,
CPP_OPEN_PAREN);
declarator = cp_parser_direct_declarator (parser, dcl_kind,
ctor_dtor_or_conv_p,
member_p);
}
if (attributes && declarator && declarator != cp_error_declarator)
declarator->attributes = attributes;
return declarator;
}
/* Parse a direct-declarator or direct-abstract-declarator.
direct-declarator:
declarator-id
direct-declarator ( parameter-declaration-clause )
cv-qualifier-seq [opt]
exception-specification [opt]
direct-declarator [ constant-expression [opt] ]
( declarator )
direct-abstract-declarator:
direct-abstract-declarator [opt]
( parameter-declaration-clause )
cv-qualifier-seq [opt]
exception-specification [opt]
direct-abstract-declarator [opt] [ constant-expression [opt] ]
( abstract-declarator )
Returns a representation of the declarator. DCL_KIND is
CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a
direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if
we are parsing a direct-declarator. It is
CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case
of ambiguity we prefer an abstract declarator, as per
[dcl.ambig.res]. CTOR_DTOR_OR_CONV_P and MEMBER_P are as for
cp_parser_declarator. */
static cp_declarator *
cp_parser_direct_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool member_p)
{
cp_token *token;
cp_declarator *declarator = NULL;
tree scope = NULL_TREE;
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
bool saved_in_declarator_p = parser->in_declarator_p;
bool first = true;
tree pushed_scope = NULL_TREE;
while (true)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_PAREN)
{
/* This is either a parameter-declaration-clause, or a
parenthesized declarator. When we know we are parsing a
named declarator, it must be a parenthesized declarator
if FIRST is true. For instance, `(int)' is a
parameter-declaration-clause, with an omitted
direct-abstract-declarator. But `((*))', is a
parenthesized abstract declarator. Finally, when T is a
template parameter `(T)' is a
parameter-declaration-clause, and not a parenthesized
named declarator.
We first try and parse a parameter-declaration-clause,
and then try a nested declarator (if FIRST is true).
It is not an error for it not to be a
parameter-declaration-clause, even when FIRST is
false. Consider,
int i (int);
int i (3);
The first is the declaration of a function while the
second is a the definition of a variable, including its
initializer.
Having seen only the parenthesis, we cannot know which of
these two alternatives should be selected. Even more
complex are examples like:
int i (int (a));
int i (int (3));
The former is a function-declaration; the latter is a
variable initialization.
Thus again, we try a parameter-declaration-clause, and if
that fails, we back out and return. */
if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
{
cp_parameter_declarator *params;
unsigned saved_num_template_parameter_lists;
/* In a member-declarator, the only valid interpretation
of a parenthesis is the start of a
parameter-declaration-clause. (It is invalid to
initialize a static data member with a parenthesized
initializer; only the "=" form of initialization is
permitted.) */
if (!member_p)
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
if (first)
{
/* If this is going to be an abstract declarator, we're
in a declarator and we can't have default args. */
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
}
/* Inside the function parameter list, surrounding
template-parameter-lists do not apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* Parse the parameter-declaration-clause. */
params = cp_parser_parameter_declaration_clause (parser);
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
/* If all went well, parse the cv-qualifier-seq and the
exception-specification. */
if (member_p || cp_parser_parse_definitely (parser))
{
cp_cv_quals cv_quals;
tree exception_specification;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0;
first = false;
/* Consume the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Parse the cv-qualifier-seq. */
cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
/* And the exception-specification. */
exception_specification
= cp_parser_exception_specification_opt (parser);
/* Create the function-declarator. */
declarator = make_call_declarator (declarator,
params,
cv_quals,
exception_specification);
/* Any subsequent parameter lists are to do with
return type, so are not those of the declared
function. */
parser->default_arg_ok_p = false;
/* Repeat the main loop. */
continue;
}
}
/* If this is the first, we can try a parenthesized
declarator. */
if (first)
{
bool saved_in_type_id_in_expr_p;
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the nested declarator. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
declarator
= cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
member_p);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
first = false;
/* Expect a `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
declarator = cp_error_declarator;
if (declarator == cp_error_declarator)
break;
goto handle_declarator;
}
/* Otherwise, we must be done. */
else
break;
}
else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
&& token->type == CPP_OPEN_SQUARE)
{
/* Parse an array-declarator. */
tree bounds;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
first = false;
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `]', then there is no
constant-expression. */
if (token->type != CPP_CLOSE_SQUARE)
{
bool non_constant_p;
bounds
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/true,
&non_constant_p);
if (!non_constant_p)
bounds = fold_non_dependent_expr (bounds);
/* Normally, the array bound must be an integral constant
expression. However, as an extension, we allow VLAs
in function scopes. */
else if (!parser->in_function_body)
{
error ("array bound is not an integer constant");
bounds = error_mark_node;
}
}
else
bounds = NULL_TREE;
/* Look for the closing `]'. */
if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"))
{
declarator = cp_error_declarator;
break;
}
declarator = make_array_declarator (declarator, bounds);
}
else if (first && dcl_kind != CP_PARSER_DECLARATOR_ABSTRACT)
{
tree qualifying_scope;
tree unqualified_name;
special_function_kind sfk;
bool abstract_ok;
/* Parse a declarator-id */
abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER);
if (abstract_ok)
cp_parser_parse_tentatively (parser);
unqualified_name
= cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok);
qualifying_scope = parser->scope;
if (abstract_ok)
{
if (!cp_parser_parse_definitely (parser))
unqualified_name = error_mark_node;
else if (unqualified_name
&& (qualifying_scope
|| (TREE_CODE (unqualified_name)
!= IDENTIFIER_NODE)))
{
cp_parser_error (parser, "expected unqualified-id");
unqualified_name = error_mark_node;
}
}
if (!unqualified_name)
return NULL;
if (unqualified_name == error_mark_node)
{
declarator = cp_error_declarator;
break;
}
if (qualifying_scope && at_namespace_scope_p ()
&& TREE_CODE (qualifying_scope) == TYPENAME_TYPE)
{
/* In the declaration of a member of a template class
outside of the class itself, the SCOPE will sometimes
be a TYPENAME_TYPE. For example, given:
template <typename T>
int S<T>::R::i = 3;
the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In
this context, we must resolve S<T>::R to an ordinary
type, rather than a typename type.
The reason we normally avoid resolving TYPENAME_TYPEs
is that a specialization of `S' might render
`S<T>::R' not a type. However, if `S' is
specialized, then this `i' will not be used, so there
is no harm in resolving the types here. */
tree type;
/* Resolve the TYPENAME_TYPE. */
type = resolve_typename_type (qualifying_scope,
/*only_current_p=*/false);
/* If that failed, the declarator is invalid. */
if (type == error_mark_node)
error ("%<%T::%D%> is not a type",
TYPE_CONTEXT (qualifying_scope),
TYPE_IDENTIFIER (qualifying_scope));
qualifying_scope = type;
}
sfk = sfk_none;
if (unqualified_name)
{
tree class_type;
if (qualifying_scope
&& CLASS_TYPE_P (qualifying_scope))
class_type = qualifying_scope;
else
class_type = current_class_type;
if (TREE_CODE (unqualified_name) == TYPE_DECL)
{
tree name_type = TREE_TYPE (unqualified_name);
if (class_type && same_type_p (name_type, class_type))
{
if (qualifying_scope
&& CLASSTYPE_USE_TEMPLATE (name_type))
{
error ("invalid use of constructor as a template");
inform ("use %<%T::%D%> instead of %<%T::%D%> to "
"name the constructor in a qualified name",
class_type,
DECL_NAME (TYPE_TI_TEMPLATE (class_type)),
class_type, name_type);
declarator = cp_error_declarator;
break;
}
else
unqualified_name = constructor_name (class_type);
}
else
{
/* We do not attempt to print the declarator
here because we do not have enough
information about its original syntactic
form. */
cp_parser_error (parser, "invalid declarator");
declarator = cp_error_declarator;
break;
}
}
if (class_type)
{
if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR)
sfk = sfk_destructor;
else if (IDENTIFIER_TYPENAME_P (unqualified_name))
sfk = sfk_conversion;
else if (/* There's no way to declare a constructor
for an anonymous type, even if the type
got a name for linkage purposes. */
!TYPE_WAS_ANONYMOUS (class_type)
&& constructor_name_p (unqualified_name,
class_type))
{
unqualified_name = constructor_name (class_type);
sfk = sfk_constructor;
}
if (ctor_dtor_or_conv_p && sfk != sfk_none)
*ctor_dtor_or_conv_p = -1;
}
}
declarator = make_id_declarator (qualifying_scope,
unqualified_name,
sfk);
declarator->id_loc = token->location;
handle_declarator:;
scope = get_scope_of_declarator (declarator);
if (scope)
/* Any names that appear after the declarator-id for a
member are looked up in the containing scope. */
pushed_scope = push_scope (scope);
parser->in_declarator_p = true;
if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p)
|| (declarator && declarator->kind == cdk_id))
/* Default args are only allowed on function
declarations. */
parser->default_arg_ok_p = saved_default_arg_ok_p;
else
parser->default_arg_ok_p = false;
first = false;
}
/* We're done. */
else
break;
}
/* For an abstract declarator, we might wind up with nothing at this
point. That's an error; the declarator is not optional. */
if (!declarator)
cp_parser_error (parser, "expected declarator");
/* If we entered a scope, we must exit it now. */
if (pushed_scope)
pop_scope (pushed_scope);
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
return declarator;
}
/* Parse a ptr-operator.
ptr-operator:
* cv-qualifier-seq [opt]
&
:: [opt] nested-name-specifier * cv-qualifier-seq [opt]
GNU Extension:
ptr-operator:
& cv-qualifier-seq [opt]
Returns INDIRECT_REF if a pointer, or pointer-to-member, was used.
Returns ADDR_EXPR if a reference was used. In the case of a
pointer-to-member, *TYPE is filled in with the TYPE containing the
member. *CV_QUALS is filled in with the cv-qualifier-seq, or
TYPE_UNQUALIFIED, if there are no cv-qualifiers. Returns
ERROR_MARK if an error occurred. */
static enum tree_code
cp_parser_ptr_operator (cp_parser* parser,
tree* type,
cp_cv_quals *cv_quals)
{
enum tree_code code = ERROR_MARK;
cp_token *token;
/* Assume that it's not a pointer-to-member. */
*type = NULL_TREE;
/* And that there are no cv-qualifiers. */
*cv_quals = TYPE_UNQUALIFIED;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `*' or `&' we have a pointer or reference. */
if (token->type == CPP_MULT || token->type == CPP_AND)
{
/* Remember which ptr-operator we were processing. */
code = (token->type == CPP_AND ? ADDR_EXPR : INDIRECT_REF);
/* Consume the `*' or `&'. */
cp_lexer_consume_token (parser->lexer);
/* A `*' can be followed by a cv-qualifier-seq, and so can a
`&', if we are allowing GNU extensions. (The only qualifier
that can legally appear after `&' is `restrict', but that is
enforced during semantic analysis. */
if (code == INDIRECT_REF
|| cp_parser_allow_gnu_extensions_p (parser))
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
}
else
{
/* Try the pointer-to-member case. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name specifier. */
cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false);
/* If we found it, and the next token is a `*', then we are
indeed looking at a pointer-to-member operator. */
if (!cp_parser_error_occurred (parser)
&& cp_parser_require (parser, CPP_MULT, "`*'"))
{
/* Indicate that the `*' operator was used. */
code = INDIRECT_REF;
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
error ("%qD is a namespace", parser->scope);
else
{
/* The type of which the member is a member is given by the
current SCOPE. */
*type = parser->scope;
/* The next name will not be qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Look for the optional cv-qualifier-seq. */
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
}
}
/* If that didn't work we don't have a ptr-operator. */
if (!cp_parser_parse_definitely (parser))
cp_parser_error (parser, "expected ptr-operator");
}
return code;
}
/* Parse an (optional) cv-qualifier-seq.
cv-qualifier-seq:
cv-qualifier cv-qualifier-seq [opt]
cv-qualifier:
const
volatile
GNU Extension:
cv-qualifier:
__restrict__
Returns a bitmask representing the cv-qualifiers. */
static cp_cv_quals
cp_parser_cv_qualifier_seq_opt (cp_parser* parser)
{
cp_cv_quals cv_quals = TYPE_UNQUALIFIED;
while (true)
{
cp_token *token;
cp_cv_quals cv_qualifier;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's a cv-qualifier. */
switch (token->keyword)
{
case RID_CONST:
cv_qualifier = TYPE_QUAL_CONST;
break;
case RID_VOLATILE:
cv_qualifier = TYPE_QUAL_VOLATILE;
break;
case RID_RESTRICT:
cv_qualifier = TYPE_QUAL_RESTRICT;
break;
default:
cv_qualifier = TYPE_UNQUALIFIED;
break;
}
if (!cv_qualifier)
break;
if (cv_quals & cv_qualifier)
{
error ("duplicate cv-qualifier");
cp_lexer_purge_token (parser->lexer);
}
else
{
cp_lexer_consume_token (parser->lexer);
cv_quals |= cv_qualifier;
}
}
return cv_quals;
}
/* Parse a declarator-id.
declarator-id:
id-expression
:: [opt] nested-name-specifier [opt] type-name
In the `id-expression' case, the value returned is as for
cp_parser_id_expression if the id-expression was an unqualified-id.
If the id-expression was a qualified-id, then a SCOPE_REF is
returned. The first operand is the scope (either a NAMESPACE_DECL
or TREE_TYPE), but the second is still just a representation of an
unqualified-id. */
static tree
cp_parser_declarator_id (cp_parser* parser, bool optional_p)
{
tree id;
/* The expression must be an id-expression. Assume that qualified
names are the names of types so that:
template <class T>
int S<T>::R::i = 3;
will work; we must treat `S<T>::R' as the name of a type.
Similarly, assume that qualified names are templates, where
required, so that:
template <class T>
int S<T>::R<T>::i = 3;
will work, too. */
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*template_p=*/NULL,
/*declarator_p=*/true,
optional_p);
if (id && BASELINK_P (id))
id = BASELINK_FUNCTIONS (id);
return id;
}
/* Parse a type-id.
type-id:
type-specifier-seq abstract-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_type_id (cp_parser* parser)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *abstract_declarator;
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_condition=*/false,
&type_specifier_seq);
if (type_specifier_seq.type == error_mark_node)
return error_mark_node;
/* There might or might not be an abstract declarator. */
cp_parser_parse_tentatively (parser);
/* Look for the declarator. */
abstract_declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Check to see if there really was a declarator. */
if (!cp_parser_parse_definitely (parser))
abstract_declarator = NULL;
return groktypename (&type_specifier_seq, abstract_declarator);
}
/* Parse a type-specifier-seq.
type-specifier-seq:
type-specifier type-specifier-seq [opt]
GNU extension:
type-specifier-seq:
attributes type-specifier-seq [opt]
If IS_CONDITION is true, we are at the start of a "condition",
e.g., we've just seen "if (".
Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */
static void
cp_parser_type_specifier_seq (cp_parser* parser,
bool is_condition,
cp_decl_specifier_seq *type_specifier_seq)
{
bool seen_type_specifier = false;
cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL;
/* Clear the TYPE_SPECIFIER_SEQ. */
clear_decl_specs (type_specifier_seq);
/* Parse the type-specifiers and attributes. */
while (true)
{
tree type_specifier;
bool is_cv_qualifier;
/* Check for attributes first. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
{
type_specifier_seq->attributes =
chainon (type_specifier_seq->attributes,
cp_parser_attributes_opt (parser));
continue;
}
/* Look for the type-specifier. */
type_specifier = cp_parser_type_specifier (parser,
flags,
type_specifier_seq,
/*is_declaration=*/false,
NULL,
&is_cv_qualifier);
if (!type_specifier)
{
/* If the first type-specifier could not be found, this is not a
type-specifier-seq at all. */
if (!seen_type_specifier)
{
cp_parser_error (parser, "expected type-specifier");
type_specifier_seq->type = error_mark_node;
return;
}
/* If subsequent type-specifiers could not be found, the
type-specifier-seq is complete. */
break;
}
seen_type_specifier = true;
/* The standard says that a condition can be:
type-specifier-seq declarator = assignment-expression
However, given:
struct S {};
if (int S = ...)
we should treat the "S" as a declarator, not as a
type-specifier. The standard doesn't say that explicitly for
type-specifier-seq, but it does say that for
decl-specifier-seq in an ordinary declaration. Perhaps it
would be clearer just to allow a decl-specifier-seq here, and
then add a semantic restriction that if any decl-specifiers
that are not type-specifiers appear, the program is invalid. */
if (is_condition && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
}
cp_parser_check_decl_spec (type_specifier_seq);
}
/* Parse a parameter-declaration-clause.
parameter-declaration-clause:
parameter-declaration-list [opt] ... [opt]
parameter-declaration-list , ...
Returns a representation for the parameter declarations. A return
value of NULL indicates a parameter-declaration-clause consisting
only of an ellipsis. */
static cp_parameter_declarator *
cp_parser_parameter_declaration_clause (cp_parser* parser)
{
cp_parameter_declarator *parameters;
cp_token *token;
bool ellipsis_p;
bool is_error;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for trivial parameter-declaration-clauses. */
if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL;
}
else if (token->type == CPP_CLOSE_PAREN)
/* There are no parameters. */
{
#ifndef NO_IMPLICIT_EXTERN_C
if (in_system_header && current_class_type == NULL
&& current_lang_name == lang_name_c)
return NULL;
else
#endif
return no_parameters;
}
/* Check for `(void)', too, which is a special case. */
else if (token->keyword == RID_VOID
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_CLOSE_PAREN))
{
/* Consume the `void' token. */
cp_lexer_consume_token (parser->lexer);
/* There are no parameters. */
return no_parameters;
}
/* Parse the parameter-declaration-list. */
parameters = cp_parser_parameter_declaration_list (parser, &is_error);
/* If a parse error occurred while parsing the
parameter-declaration-list, then the entire
parameter-declaration-clause is erroneous. */
if (is_error)
return NULL;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', the clause should terminate with an ellipsis. */
if (token->type == CPP_COMMA)
{
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
/* Expect an ellipsis. */
ellipsis_p
= (cp_parser_require (parser, CPP_ELLIPSIS, "`...'") != NULL);
}
/* It might also be `...' if the optional trailing `,' was
omitted. */
else if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
/* And remember that we saw it. */
ellipsis_p = true;
}
else
ellipsis_p = false;
/* Finish the parameter list. */
if (parameters && ellipsis_p)
parameters->ellipsis_p = true;
return parameters;
}
/* Parse a parameter-declaration-list.
parameter-declaration-list:
parameter-declaration
parameter-declaration-list , parameter-declaration
Returns a representation of the parameter-declaration-list, as for
cp_parser_parameter_declaration_clause. However, the
`void_list_node' is never appended to the list. Upon return,
*IS_ERROR will be true iff an error occurred. */
static cp_parameter_declarator *
cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error)
{
cp_parameter_declarator *parameters = NULL;
cp_parameter_declarator **tail = ¶meters;
bool saved_in_unbraced_linkage_specification_p;
/* Assume all will go well. */
*is_error = false;
/* The special considerations that apply to a function within an
unbraced linkage specifications do not apply to the parameters
to the function. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Look for more parameters. */
while (true)
{
cp_parameter_declarator *parameter;
bool parenthesized_p;
/* Parse the parameter. */
parameter
= cp_parser_parameter_declaration (parser,
/*template_parm_p=*/false,
&parenthesized_p);
/* If a parse error occurred parsing the parameter declaration,
then the entire parameter-declaration-list is erroneous. */
if (!parameter)
{
*is_error = true;
parameters = NULL;
break;
}
/* Add the new parameter to the list. */
*tail = parameter;
tail = ¶meter->next;
/* Peek at the next token. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)
/* These are for Objective-C++ */
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
/* The parameter-declaration-list is complete. */
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an ellipsis, then the list is complete. */
if (token->type == CPP_ELLIPSIS)
break;
/* Otherwise, there must be more parameters. Consume the
`,'. */
cp_lexer_consume_token (parser->lexer);
/* When parsing something like:
int i(float f, double d)
we can tell after seeing the declaration for "f" that we
are not looking at an initialization of a variable "i",
but rather at the declaration of a function "i".
Due to the fact that the parsing of template arguments
(as specified to a template-id) requires backtracking we
cannot use this technique when inside a template argument
list. */
if (!parser->in_template_argument_list_p
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
/* However, a parameter-declaration of the form
"foat(f)" (which is a valid declaration of a
parameter "f") can also be interpreted as an
expression (the conversion of "f" to "float"). */
&& !parenthesized_p)
cp_parser_commit_to_tentative_parse (parser);
}
else
{
cp_parser_error (parser, "expected %<,%> or %<...%>");
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/false);
break;
}
}
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
return parameters;
}
/* Parse a parameter declaration.
parameter-declaration:
decl-specifier-seq declarator
decl-specifier-seq declarator = assignment-expression
decl-specifier-seq abstract-declarator [opt]
decl-specifier-seq abstract-declarator [opt] = assignment-expression
If TEMPLATE_PARM_P is TRUE, then this parameter-declaration
declares a template parameter. (In that case, a non-nested `>'
token encountered during the parsing of the assignment-expression
is not interpreted as a greater-than operator.)
Returns a representation of the parameter, or NULL if an error
occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to
true iff the declarator is of the form "(p)". */
static cp_parameter_declarator *
cp_parser_parameter_declaration (cp_parser *parser,
bool template_parm_p,
bool *parenthesized_p)
{
int declares_class_or_enum;
bool greater_than_is_operator_p;
cp_decl_specifier_seq decl_specifiers;
cp_declarator *declarator;
tree default_argument;
cp_token *token;
const char *saved_message;
/* In a template parameter, `>' is not an operator.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
greater_than_is_operator_p = !template_parm_p;
/* Type definitions may not appear in parameter types. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in parameter types";
/* Parse the declaration-specifiers. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_NONE,
&decl_specifiers,
&declares_class_or_enum);
/* If an error occurred, there's no reason to attempt to parse the
rest of the declaration. */
if (cp_parser_error_occurred (parser))
{
parser->type_definition_forbidden_message = saved_message;
return NULL;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a `)', `,', `=', `>', or `...', then there
is no declarator. */
if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_ELLIPSIS
|| token->type == CPP_GREATER)
{
declarator = NULL;
if (parenthesized_p)
*parenthesized_p = false;
}
/* Otherwise, there should be a declarator. */
else
{
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
parser->default_arg_ok_p = false;
/* After seeing a decl-specifier-seq, if the next token is not a
"(", there is no possibility that the code is a valid
expression. Therefore, if parsing tentatively, we commit at
this point. */
if (!parser->in_template_argument_list_p
/* In an expression context, having seen:
(int((char ...
we cannot be sure whether we are looking at a
function-type (taking a "char" as a parameter) or a cast
of some object of type "char" to "int". */
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
cp_parser_commit_to_tentative_parse (parser);
/* Parse the declarator. */
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
parenthesized_p,
/*member_p=*/false);
parser->default_arg_ok_p = saved_default_arg_ok_p;
/* After the declarator, allow more attributes. */
decl_specifiers.attributes
= chainon (decl_specifiers.attributes,
cp_parser_attributes_opt (parser));
}
/* The restriction on defining new types applies only to the type
of the parameter, not to the default argument. */
parser->type_definition_forbidden_message = saved_message;
/* If the next token is `=', then process a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
bool saved_greater_than_is_operator_p;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* If we are defining a class, then the tokens that make up the
default argument must be saved and processed later. */
if (!template_parm_p && at_class_scope_p ()
&& TYPE_BEING_DEFINED (current_class_type))
{
unsigned depth = 0;
cp_token *first_token;
cp_token *token;
/* Add tokens until we have processed the entire default
argument. We add the range [first_token, token). */
first_token = cp_lexer_peek_token (parser->lexer);
while (true)
{
bool done = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* What we do depends on what token we have. */
switch (token->type)
{
/* In valid code, a default argument must be
immediately followed by a `,' `)', or `...'. */
case CPP_COMMA:
case CPP_CLOSE_PAREN:
case CPP_ELLIPSIS:
/* If we run into a non-nested `;', `}', or `]',
then the code is invalid -- but the default
argument is certainly over. */
case CPP_SEMICOLON:
case CPP_CLOSE_BRACE:
case CPP_CLOSE_SQUARE:
if (depth == 0)
done = true;
/* Update DEPTH, if necessary. */
else if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_SQUARE)
--depth;
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
case CPP_OPEN_BRACE:
++depth;
break;
case CPP_GREATER:
/* If we see a non-nested `>', and `>' is not an
operator, then it marks the end of the default
argument. */
if (!depth && !greater_than_is_operator_p)
done = true;
break;
/* If we run out of tokens, issue an error message. */
case CPP_EOF:
case CPP_PRAGMA_EOL:
error ("file ends in default argument");
done = true;
break;
case CPP_NAME:
case CPP_SCOPE:
/* In these cases, we should look for template-ids.
For example, if the default argument is
`X<int, double>()', we need to do name lookup to
figure out whether or not `X' is a template; if
so, the `,' does not end the default argument.
That is not yet done. */
break;
default:
break;
}
/* If we've reached the end, stop. */
if (done)
break;
/* Add the token to the token block. */
token = cp_lexer_consume_token (parser->lexer);
}
/* Create a DEFAULT_ARG to represented the unparsed default
argument. */
default_argument = make_node (DEFAULT_ARG);
DEFARG_TOKENS (default_argument)
= cp_token_cache_new (first_token, token);
DEFARG_INSTANTIATIONS (default_argument) = NULL;
}
/* Outside of a class definition, we can just parse the
assignment-expression. */
else
{
bool saved_local_variables_forbidden_p;
/* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is
set correctly. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = greater_than_is_operator_p;
/* Local variable names (and the `this' keyword) may not
appear in a default argument. */
saved_local_variables_forbidden_p
= parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
/* The default argument expression may cause implicitly
defined member functions to be synthesized, which will
result in garbage collection. We must treat this
situation as if we were within the body of function so as
to avoid collecting live data on the stack. */
++function_depth;
/* Parse the assignment-expression. */
if (template_parm_p)
push_deferring_access_checks (dk_no_deferred);
default_argument
= cp_parser_assignment_expression (parser, /*cast_p=*/false);
if (template_parm_p)
pop_deferring_access_checks ();
/* Restore saved state. */
--function_depth;
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
parser->local_variables_forbidden_p
= saved_local_variables_forbidden_p;
}
if (!parser->default_arg_ok_p)
{
if (!flag_pedantic_errors)
warning (0, "deprecated use of default argument for parameter of non-function");
else
{
error ("default arguments are only permitted for function parameters");
default_argument = NULL_TREE;
}
}
}
else
default_argument = NULL_TREE;
return make_parameter_declarator (&decl_specifiers,
declarator,
default_argument);
}
/* Parse a function-body.
function-body:
compound_statement */
static void
cp_parser_function_body (cp_parser *parser)
{
cp_parser_compound_statement (parser, NULL, false);
}
/* Parse a ctor-initializer-opt followed by a function-body. Return
true if a ctor-initializer was present. */
static bool
cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser)
{
tree body;
bool ctor_initializer_p;
/* Begin the function body. */
body = begin_function_body ();
/* Parse the optional ctor-initializer. */
ctor_initializer_p = cp_parser_ctor_initializer_opt (parser);
/* Parse the function-body. */
cp_parser_function_body (parser);
/* Finish the function body. */
finish_function_body (body);
return ctor_initializer_p;
}
/* Parse an initializer.
initializer:
= initializer-clause
( expression-list )
Returns an expression representing the initializer. If no
initializer is present, NULL_TREE is returned.
*IS_PARENTHESIZED_INIT is set to TRUE if the `( expression-list )'
production is used, and zero otherwise. *IS_PARENTHESIZED_INIT is
set to FALSE if there is no initializer present. If there is an
initializer, and it is not a constant-expression, *NON_CONSTANT_P
is set to true; otherwise it is set to false. */
static tree
cp_parser_initializer (cp_parser* parser, bool* is_parenthesized_init,
bool* non_constant_p)
{
cp_token *token;
tree init;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Let our caller know whether or not this initializer was
parenthesized. */
*is_parenthesized_init = (token->type == CPP_OPEN_PAREN);
/* Assume that the initializer is constant. */
*non_constant_p = false;
if (token->type == CPP_EQ)
{
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the initializer-clause. */
init = cp_parser_initializer_clause (parser, non_constant_p);
}
else if (token->type == CPP_OPEN_PAREN)
init = cp_parser_parenthesized_expression_list (parser, false,
/*cast_p=*/false,
non_constant_p);
else
{
/* Anything else is an error. */
cp_parser_error (parser, "expected initializer");
init = error_mark_node;
}
return init;
}
/* Parse an initializer-clause.
initializer-clause:
assignment-expression
{ initializer-list , [opt] }
{ }
Returns an expression representing the initializer.
If the `assignment-expression' production is used the value
returned is simply a representation for the expression.
Otherwise, a CONSTRUCTOR is returned. The CONSTRUCTOR_ELTS will be
the elements of the initializer-list (or NULL, if the last
production is used). The TREE_TYPE for the CONSTRUCTOR will be
NULL_TREE. There is no way to detect whether or not the optional
trailing `,' was provided. NON_CONSTANT_P is as for
cp_parser_initializer. */
static tree
cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p)
{
tree initializer;
/* Assume the expression is constant. */
*non_constant_p = false;
/* If it is not a `{', then we are looking at an
assignment-expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
{
initializer
= cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
non_constant_p);
if (!*non_constant_p)
initializer = fold_non_dependent_expr (initializer);
}
else
{
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Create a CONSTRUCTOR to represent the braced-initializer. */
initializer = make_node (CONSTRUCTOR);
/* If it's not a `}', then there is a non-trivial initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE))
{
/* Parse the initializer list. */
CONSTRUCTOR_ELTS (initializer)
= cp_parser_initializer_list (parser, non_constant_p);
/* A trailing `,' token is allowed. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
}
/* Now, there should be a trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
}
return initializer;
}
/* Parse an initializer-list.
initializer-list:
initializer-clause
initializer-list , initializer-clause
GNU Extension:
initializer-list:
identifier : initializer-clause
initializer-list, identifier : initializer-clause
Returns a VEC of constructor_elt. The VALUE of each elt is an expression
for the initializer. If the INDEX of the elt is non-NULL, it is the
IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is
as for cp_parser_initializer. */
static VEC(constructor_elt,gc) *
cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p)
{
VEC(constructor_elt,gc) *v = NULL;
/* Assume all of the expressions are constant. */
*non_constant_p = false;
/* Parse the rest of the list. */
while (true)
{
cp_token *token;
tree identifier;
tree initializer;
bool clause_non_constant_p;
/* If the next token is an identifier and the following one is a
colon, we are looking at the GNU designated-initializer
syntax. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)
{
/* Warn the user that they are using an extension. */
if (pedantic)
pedwarn ("ISO C++ does not allow designated initializers");
/* Consume the identifier. */
identifier = cp_lexer_consume_token (parser->lexer)->u.value;
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
}
else
identifier = NULL_TREE;
/* Parse the initializer. */
initializer = cp_parser_initializer_clause (parser,
&clause_non_constant_p);
/* If any clause is non-constant, so is the entire initializer. */
if (clause_non_constant_p)
*non_constant_p = true;
/* Add it to the vector. */
CONSTRUCTOR_APPEND_ELT(v, identifier, initializer);
/* If the next token is not a comma, we have reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If the next token is a `}', then we're still done. An
initializer-clause can have a trailing `,' after the
initializer-list and before the closing `}'. */
if (token->type == CPP_CLOSE_BRACE)
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return v;
}
/* Classes [gram.class] */
/* Parse a class-name.
class-name:
identifier
template-id
TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used
to indicate that names looked up in dependent types should be
assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template'
keyword has been used to indicate that the name that appears next
is a template. TAG_TYPE indicates the explicit tag given before
the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are
looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class
is the class being defined in a class-head.
Returns the TYPE_DECL representing the class. */
static tree
cp_parser_class_name (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
enum tag_types tag_type,
bool check_dependency_p,
bool class_head_p,
bool is_declaration)
{
tree decl;
tree scope;
bool typename_p;
cp_token *token;
/* All class-names start with an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID)
{
cp_parser_error (parser, "expected class-name");
return error_mark_node;
}
/* PARSER->SCOPE can be cleared when parsing the template-arguments
to a template-id, so we save it here. */
scope = parser->scope;
if (scope == error_mark_node)
return error_mark_node;
/* Any name names a type if we're following the `typename' keyword
in a qualified name where the enclosing scope is type-dependent. */
typename_p = (typename_keyword_p && scope && TYPE_P (scope)
&& dependent_type_p (scope));
/* Handle the common case (an identifier, but not a template-id)
efficiently. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))
{
cp_token *identifier_token;
tree identifier;
bool ambiguous_p;
/* Look for the identifier. */
identifier_token = cp_lexer_peek_token (parser->lexer);
ambiguous_p = identifier_token->ambiguous_p;
identifier = cp_parser_identifier (parser);
/* If the next token isn't an identifier, we are certainly not
looking at a class-name. */
if (identifier == error_mark_node)
decl = error_mark_node;
/* If we know this is a type-name, there's no need to look it
up. */
else if (typename_p)
decl = identifier;
else
{
tree ambiguous_decls;
/* If we already know that this lookup is ambiguous, then
we've already issued an error message; there's no reason
to check again. */
if (ambiguous_p)
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* If the next token is a `::', then the name must be a type
name.
[basic.lookup.qual]
During the lookup for a name preceding the :: scope
resolution operator, object, function, and enumerator
names are ignored. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
tag_type = typename_type;
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
check_dependency_p,
&ambiguous_decls);
if (ambiguous_decls)
{
error ("reference to %qD is ambiguous", identifier);
print_candidates (ambiguous_decls);
if (cp_parser_parsing_tentatively (parser))
{
identifier_token->ambiguous_p = true;
cp_parser_simulate_error (parser);
}
return error_mark_node;
}
}
}
else
{
/* Try a template-id. */
decl = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
is_declaration);
if (decl == error_mark_node)
return error_mark_node;
}
decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p);
/* If this is a typename, create a TYPENAME_TYPE. */
if (typename_p && decl != error_mark_node)
{
decl = make_typename_type (scope, decl, typename_type,
/*complain=*/tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
/* Check to see that it is really the name of a class. */
if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& TREE_CODE (TREE_OPERAND (decl, 0)) == IDENTIFIER_NODE
&& cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* Situations like this:
template <typename T> struct A {
typename T::template X<int>::I i;
};
are problematic. Is `T::template X<int>' a class-name? The
standard does not seem to be definitive, but there is no other
valid interpretation of the following `::'. Therefore, those
names are considered class-names. */
{
decl = make_typename_type (scope, decl, tag_type, tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
else if (TREE_CODE (decl) != TYPE_DECL
|| TREE_TYPE (decl) == error_mark_node
|| !IS_AGGR_TYPE (TREE_TYPE (decl)))
decl = error_mark_node;
if (decl == error_mark_node)
cp_parser_error (parser, "expected class-name");
return decl;
}
/* Parse a class-specifier.
class-specifier:
class-head { member-specification [opt] }
Returns the TREE_TYPE representing the class. */
static tree
cp_parser_class_specifier (cp_parser* parser)
{
cp_token *token;
tree type;
tree attributes = NULL_TREE;
int has_trailing_semicolon;
bool nested_name_specifier_p;
unsigned saved_num_template_parameter_lists;
bool saved_in_function_body;
tree old_scope = NULL_TREE;
tree scope = NULL_TREE;
tree bases = NULL_TREE;
push_deferring_access_checks (dk_no_deferred);
/* Parse the class-head. */
type = cp_parser_class_head (parser,
&nested_name_specifier_p,
&attributes,
&bases);
/* If the class-head was a semantic disaster, skip the entire body
of the class. */
if (!type)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
pop_deferring_access_checks ();
return error_mark_node;
}
/* Look for the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"))
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* Process the base classes. If they're invalid, skip the
entire class body. */
if (!xref_basetypes (type, bases))
{
cp_parser_skip_to_closing_brace (parser);
/* Consuming the closing brace yields better error messages
later on. */
cp_lexer_consume_token (parser->lexer);
pop_deferring_access_checks ();
return error_mark_node;
}
/* Issue an error message if type-definitions are forbidden here. */
cp_parser_check_type_definition (parser);
/* Remember that we are defining one more class. */
++parser->num_classes_being_defined;
/* Inside the class, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* We are not in a function body. */
saved_in_function_body = parser->in_function_body;
parser->in_function_body = false;
/* Start the class. */
if (nested_name_specifier_p)
{
scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type));
old_scope = push_inner_scope (scope);
}
type = begin_class_definition (type, attributes);
if (type == error_mark_node)
/* If the type is erroneous, skip the entire body of the class. */
cp_parser_skip_to_closing_brace (parser);
else
/* Parse the member-specification. */
cp_parser_member_specification_opt (parser);
/* Look for the trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
/* We get better error messages by noticing a common problem: a
missing trailing `;'. */
token = cp_lexer_peek_token (parser->lexer);
has_trailing_semicolon = (token->type == CPP_SEMICOLON);
/* Look for trailing attributes to apply to this class. */
if (cp_parser_allow_gnu_extensions_p (parser))
attributes = cp_parser_attributes_opt (parser);
if (type != error_mark_node)
type = finish_struct (type, attributes);
if (nested_name_specifier_p)
pop_inner_scope (old_scope, scope);
/* If this class is not itself within the scope of another class,
then we need to parse the bodies of all of the queued function
definitions. Note that the queued functions defined in a class
are not always processed immediately following the
class-specifier for that class. Consider:
struct A {
struct B { void f() { sizeof (A); } };
};
If `f' were processed before the processing of `A' were
completed, there would be no way to compute the size of `A'.
Note that the nesting we are interested in here is lexical --
not the semantic nesting given by TYPE_CONTEXT. In particular,
for:
struct A { struct B; };
struct A::B { void f() { } };
there is no need to delay the parsing of `A::B::f'. */
if (--parser->num_classes_being_defined == 0)
{
tree queue_entry;
tree fn;
tree class_type = NULL_TREE;
tree pushed_scope = NULL_TREE;
/* In a first pass, parse default arguments to the functions.
Then, in a second pass, parse the bodies of the functions.
This two-phased approach handles cases like:
struct S {
void f() { g(); }
void g(int i = 3);
};
*/
for (TREE_PURPOSE (parser->unparsed_functions_queues)
= nreverse (TREE_PURPOSE (parser->unparsed_functions_queues));
(queue_entry = TREE_PURPOSE (parser->unparsed_functions_queues));
TREE_PURPOSE (parser->unparsed_functions_queues)
= TREE_CHAIN (TREE_PURPOSE (parser->unparsed_functions_queues)))
{
fn = TREE_VALUE (queue_entry);
/* If there are default arguments that have not yet been processed,
take care of them now. */
if (class_type != TREE_PURPOSE (queue_entry))
{
if (pushed_scope)
pop_scope (pushed_scope);
class_type = TREE_PURPOSE (queue_entry);
pushed_scope = push_scope (class_type);
}
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (fn);
/* Parse the default argument expressions. */
cp_parser_late_parsing_default_args (parser, fn);
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
}
if (pushed_scope)
pop_scope (pushed_scope);
/* Now parse the body of the functions. */
for (TREE_VALUE (parser->unparsed_functions_queues)
= nreverse (TREE_VALUE (parser->unparsed_functions_queues));
(queue_entry = TREE_VALUE (parser->unparsed_functions_queues));
TREE_VALUE (parser->unparsed_functions_queues)
= TREE_CHAIN (TREE_VALUE (parser->unparsed_functions_queues)))
{
/* Figure out which function we need to process. */
fn = TREE_VALUE (queue_entry);
/* Parse the function. */
cp_parser_late_parsing_for_member (parser, fn);
}
}
/* Put back any saved access checks. */
pop_deferring_access_checks ();
/* Restore saved state. */
parser->in_function_body = saved_in_function_body;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
return type;
}
/* Parse a class-head.
class-head:
class-key identifier [opt] base-clause [opt]
class-key nested-name-specifier identifier base-clause [opt]
class-key nested-name-specifier [opt] template-id
base-clause [opt]
GNU Extensions:
class-key attributes identifier [opt] base-clause [opt]
class-key attributes nested-name-specifier identifier base-clause [opt]
class-key attributes nested-name-specifier [opt] template-id
base-clause [opt]
Returns the TYPE of the indicated class. Sets
*NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions
involving a nested-name-specifier was used, and FALSE otherwise.
Returns error_mark_node if this is not a class-head.
Returns NULL_TREE if the class-head is syntactically valid, but
semantically invalid in a way that means we should skip the entire
body of the class. */
static tree
cp_parser_class_head (cp_parser* parser,
bool* nested_name_specifier_p,
tree *attributes_p,
tree *bases)
{
tree nested_name_specifier;
enum tag_types class_key;
tree id = NULL_TREE;
tree type = NULL_TREE;
tree attributes;
bool template_id_p = false;
bool qualified_p = false;
bool invalid_nested_name_p = false;
bool invalid_explicit_specialization_p = false;
tree pushed_scope = NULL_TREE;
unsigned num_templates;
/* Assume no nested-name-specifier will be present. */
*nested_name_specifier_p = false;
/* Assume no template parameter lists will be used in defining the
type. */
num_templates = 0;
/* Look for the class-key. */
class_key = cp_parser_class_key (parser);
if (class_key == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* If the next token is `::', that is invalid -- but sometimes
people do try to write:
struct ::S {};
Handle this gracefully by accepting the extra qualifier, and then
issuing an error about it later if this really is a
class-head. If it turns out just to be an elaborated type
specifier, remain silent. */
if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false))
qualified_p = true;
push_deferring_access_checks (dk_no_check);
/* Determine the name of the class. Begin by looking for an
optional nested-name-specifier. */
nested_name_specifier
= cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false);
/* If there was a nested-name-specifier, then there *must* be an
identifier. */
if (nested_name_specifier)
{
/* Although the grammar says `identifier', it really means
`class-name' or `template-name'. You are only allowed to
define a class that has already been declared with this
syntax.
The proposed resolution for Core Issue 180 says that wherever
you see `class T::X' you should treat `X' as a type-name.
It is OK to define an inaccessible class; for example:
class A { class B; };
class A::B {};
We do not know if we will see a class-name, or a
template-name. We look for a class-name first, in case the
class-name is a template-id; if we looked for the
template-name first we would stop after the template-name. */
cp_parser_parse_tentatively (parser);
type = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
class_type,
/*check_dependency_p=*/false,
/*class_head_p=*/true,
/*is_declaration=*/false);
/* If that didn't work, ignore the nested-name-specifier. */
if (!cp_parser_parse_definitely (parser))
{
invalid_nested_name_p = true;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
id = NULL_TREE;
}
/* If we could not find a corresponding TYPE, treat this
declaration like an unqualified declaration. */
if (type == error_mark_node)
nested_name_specifier = NULL_TREE;
/* Otherwise, count the number of templates used in TYPE and its
containing scopes. */
else
{
tree scope;
for (scope = TREE_TYPE (type);
scope && TREE_CODE (scope) != NAMESPACE_DECL;
scope = (TYPE_P (scope)
? TYPE_CONTEXT (scope)
: DECL_CONTEXT (scope)))
if (TYPE_P (scope)
&& CLASS_TYPE_P (scope)
&& CLASSTYPE_TEMPLATE_INFO (scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope))
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (scope))
++num_templates;
}
}
/* Otherwise, the identifier is optional. */
else
{
/* We don't know whether what comes next is a template-id,
an identifier, or nothing at all. */
cp_parser_parse_tentatively (parser);
/* Check for a template-id. */
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*is_declaration=*/true);
/* If that didn't work, it could still be an identifier. */
if (!cp_parser_parse_definitely (parser))
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
id = cp_parser_identifier (parser);
else
id = NULL_TREE;
}
else
{
template_id_p = true;
++num_templates;
}
}
pop_deferring_access_checks ();
if (id)
cp_parser_check_for_invalid_template_id (parser, id);
/* If it's not a `:' or a `{' then we can't really be looking at a
class-head, since a class-head only appears as part of a
class-specifier. We have to detect this situation before calling
xref_tag, since that has irreversible side-effects. */
if (!cp_parser_next_token_starts_class_definition_p (parser))
{
cp_parser_error (parser, "expected %<{%> or %<:%>");
return error_mark_node;
}
/* At this point, we're going ahead with the class-specifier, even
if some other problem occurs. */
cp_parser_commit_to_tentative_parse (parser);
/* Issue the error about the overly-qualified name now. */
if (qualified_p)
cp_parser_error (parser,
"global qualification of class name is invalid");
else if (invalid_nested_name_p)
cp_parser_error (parser,
"qualified name does not name a class");
else if (nested_name_specifier)
{
tree scope;
/* Reject typedef-names in class heads. */
if (!DECL_IMPLICIT_TYPEDEF_P (type))
{
error ("invalid class name in declaration of %qD", type);
type = NULL_TREE;
goto done;
}
/* Figure out in what scope the declaration is being placed. */
scope = current_scope ();
/* If that scope does not contain the scope in which the
class was originally declared, the program is invalid. */
if (scope && !is_ancestor (scope, nested_name_specifier))
{
error ("declaration of %qD in %qD which does not enclose %qD",
type, scope, nested_name_specifier);
type = NULL_TREE;
goto done;
}
/* [dcl.meaning]
A declarator-id shall not be qualified exception of the
definition of a ... nested class outside of its class
... [or] a the definition or explicit instantiation of a
class member of a namespace outside of its namespace. */
if (scope == nested_name_specifier)
{
pedwarn ("extra qualification ignored");
nested_name_specifier = NULL_TREE;
num_templates = 0;
}
}
/* An explicit-specialization must be preceded by "template <>". If
it is not, try to recover gracefully. */
if (at_namespace_scope_p ()
&& parser->num_template_parameter_lists == 0
&& template_id_p)
{
error ("an explicit specialization must be preceded by %<template <>%>");
invalid_explicit_specialization_p = true;
/* Take the same action that would have been taken by
cp_parser_explicit_specialization. */
++parser->num_template_parameter_lists;
begin_specialization ();
}
/* There must be no "return" statements between this point and the
end of this function; set "type "to the correct return value and
use "goto done;" to return. */
/* Make sure that the right number of template parameters were
present. */
if (!cp_parser_check_template_parameters (parser, num_templates))
{
/* If something went wrong, there is no point in even trying to
process the class-definition. */
type = NULL_TREE;
goto done;
}
/* Look up the type. */
if (template_id_p)
{
type = TREE_TYPE (id);
type = maybe_process_partial_specialization (type);
if (nested_name_specifier)
pushed_scope = push_scope (nested_name_specifier);
}
else if (nested_name_specifier)
{
tree class_type;
/* Given:
template <typename T> struct S { struct T };
template <typename T> struct S<T>::T { };
we will get a TYPENAME_TYPE when processing the definition of
`S::T'. We need to resolve it to the actual type before we
try to define it. */
if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE)
{
class_type = resolve_typename_type (TREE_TYPE (type),
/*only_current_p=*/false);
if (class_type != error_mark_node)
type = TYPE_NAME (class_type);
else
{
cp_parser_error (parser, "could not resolve typename type");
type = error_mark_node;
}
}
maybe_process_partial_specialization (TREE_TYPE (type));
class_type = current_class_type;
/* Enter the scope indicated by the nested-name-specifier. */
pushed_scope = push_scope (nested_name_specifier);
/* Get the canonical version of this type. */
type = TYPE_MAIN_DECL (TREE_TYPE (type));
if (PROCESSING_REAL_TEMPLATE_DECL_P ()
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type)))
{
type = push_template_decl (type);
if (type == error_mark_node)
{
type = NULL_TREE;
goto done;
}
}
type = TREE_TYPE (type);
*nested_name_specifier_p = true;
}
else /* The name is not a nested name. */
{
/* If the class was unnamed, create a dummy name. */
if (!id)
id = make_anon_name ();
type = xref_tag (class_key, id, /*tag_scope=*/ts_current,
parser->num_template_parameter_lists);
}
/* Indicate whether this class was declared as a `class' or as a
`struct'. */
if (TREE_CODE (type) == RECORD_TYPE)
CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type);
cp_parser_check_class_key (class_key, type);
/* If this type was already complete, and we see another definition,
that's an error. */
if (type != error_mark_node && COMPLETE_TYPE_P (type))
{
error ("redefinition of %q#T", type);
error ("previous definition of %q+#T", type);
type = NULL_TREE;
goto done;
}
else if (type == error_mark_node)
type = NULL_TREE;
/* We will have entered the scope containing the class; the names of
base classes should be looked up in that context. For example:
struct A { struct B {}; struct C; };
struct A::C : B {};
is valid. */
*bases = NULL_TREE;
/* Get the list of base-classes, if there is one. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
*bases = cp_parser_base_clause (parser);
done:
/* Leave the scope given by the nested-name-specifier. We will
enter the class scope itself while processing the members. */
if (pushed_scope)
pop_scope (pushed_scope);
if (invalid_explicit_specialization_p)
{
end_specialization ();
--parser->num_template_parameter_lists;
}
*attributes_p = attributes;
return type;
}
/* Parse a class-key.
class-key:
class
struct
union
Returns the kind of class-key specified, or none_type to indicate
error. */
static enum tag_types
cp_parser_class_key (cp_parser* parser)
{
cp_token *token;
enum tag_types tag_type;
/* Look for the class-key. */
token = cp_parser_require (parser, CPP_KEYWORD, "class-key");
if (!token)
return none_type;
/* Check to see if the TOKEN is a class-key. */
tag_type = cp_parser_token_is_class_key (token);
if (!tag_type)
cp_parser_error (parser, "expected class-key");
return tag_type;
}
/* Parse an (optional) member-specification.
member-specification:
member-declaration member-specification [opt]
access-specifier : member-specification [opt] */
static void
cp_parser_member_specification_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
enum rid keyword;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `}', or EOF then we've seen all the members. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
/* See if this token is a keyword. */
keyword = token->keyword;
switch (keyword)
{
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
/* Remember which access-specifier is active. */
current_access_specifier = token->u.value;
/* Look for the `:'. */
cp_parser_require (parser, CPP_COLON, "`:'");
break;
default:
/* Accept #pragmas at class scope. */
if (token->type == CPP_PRAGMA)
{
cp_parser_pragma (parser, pragma_external);
break;
}
/* Otherwise, the next construction must be a
member-declaration. */
cp_parser_member_declaration (parser);
}
}
}
/* Parse a member-declaration.
member-declaration:
decl-specifier-seq [opt] member-declarator-list [opt] ;
function-definition ; [opt]
:: [opt] nested-name-specifier template [opt] unqualified-id ;
using-declaration
template-declaration
member-declarator-list:
member-declarator
member-declarator-list , member-declarator
member-declarator:
declarator pure-specifier [opt]
declarator constant-initializer [opt]
identifier [opt] : constant-expression
GNU Extensions:
member-declaration:
__extension__ member-declaration
member-declarator:
declarator attributes [opt] pure-specifier [opt]
declarator attributes [opt] constant-initializer [opt]
identifier [opt] attributes [opt] : constant-expression */
static void
cp_parser_member_declaration (cp_parser* parser)
{
cp_decl_specifier_seq decl_specifiers;
tree prefix_attributes;
tree decl;
int declares_class_or_enum;
bool friend_p;
cp_token *token;
int saved_pedantic;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Recurse. */
cp_parser_member_declaration (parser);
/* Restore the old value of the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Check for a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* An explicit specialization here is an error condition, and we
expect the specialization handler to detect and report this. */
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
else
cp_parser_template_declaration (parser, /*member_p=*/true);
return;
}
/* Check for a using-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING))
{
/* Parse the using-declaration. */
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
return;
}
/* Check for @defs. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS))
{
tree ivar, member;
tree ivar_chains = cp_parser_objc_defs_expression (parser);
ivar = ivar_chains;
while (ivar)
{
member = ivar;
ivar = TREE_CHAIN (member);
TREE_CHAIN (member) = NULL_TREE;
finish_member_declaration (member);
}
return;
}
if (cp_parser_using_declaration (parser, /*access_declaration=*/true))
return;
/* Parse the decl-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
prefix_attributes = decl_specifiers.attributes;
decl_specifiers.attributes = NULL_TREE;
/* Check for an invalid type-name. */
if (!decl_specifiers.type
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
return;
/* If there is no declarator, then the decl-specifier-seq should
specify a type. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
/* If there was no decl-specifier-seq, and the next token is a
`;', then we have something like:
struct S { ; };
[class.mem]
Each member-declaration shall declare at least one member
name of the class. */
if (!decl_specifiers.any_specifiers_p)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (pedantic && !token->in_system_header)
pedwarn ("%Hextra %<;%>", &token->location);
}
else
{
tree type;
/* See if this declaration is a friend. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* If there were decl-specifiers, check to see if there was
a class-declaration. */
type = check_tag_decl (&decl_specifiers);
/* Nested classes have already been added to the class, but
a `friend' needs to be explicitly registered. */
if (friend_p)
{
/* If the `friend' keyword was present, the friend must
be introduced with a class-key. */
if (!declares_class_or_enum)
error ("a class-key must be used when declaring a friend");
/* In this case:
template <typename T> struct A {
friend struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by check_tag_decl. */
if (!type
&& decl_specifiers.type
&& TYPE_P (decl_specifiers.type))
type = decl_specifiers.type;
if (!type || !TYPE_P (type))
error ("friend declaration does not name a class or "
"function");
else
make_friend_class (current_class_type, type,
/*complain=*/true);
}
/* If there is no TYPE, an error message will already have
been issued. */
else if (!type || type == error_mark_node)
;
/* An anonymous aggregate has to be handled specially; such
a declaration really declares a data member (with a
particular type), as opposed to a nested class. */
else if (ANON_AGGR_TYPE_P (type))
{
/* Remove constructors and such from TYPE, now that we
know it is an anonymous aggregate. */
fixup_anonymous_aggr (type);
/* And make the corresponding data member. */
decl = build_decl (FIELD_DECL, NULL_TREE, type);
/* Add it to the class. */
finish_member_declaration (decl);
}
else
cp_parser_check_access_in_redeclaration (TYPE_NAME (type));
}
}
else
{
/* See if these declarations will be friends. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree attributes = NULL_TREE;
tree first_attribute;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for a bitfield declaration. */
if (token->type == CPP_COLON
|| (token->type == CPP_NAME
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
tree identifier;
tree width;
/* Get the name of the bitfield. Note that we cannot just
check TOKEN here because it may have been invalidated by
the call to cp_lexer_peek_nth_token above. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON)
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
/* Look for attributes that apply to the bitfield. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* Create the bitfield declaration. */
decl = grokbitfield (identifier
? make_id_declarator (NULL_TREE,
identifier,
sfk_none)
: NULL,
&decl_specifiers,
width);
/* Apply the attributes. */
cplus_decl_attributes (&decl, attributes, /*flags=*/0);
}
else
{
cp_declarator *declarator;
tree initializer;
tree asm_specification;
int ctor_dtor_or_conv_p;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/true);
/* If something went wrong parsing the declarator, make sure
that we at least consume some tokens. */
if (declarator == cp_error_declarator)
{
/* Skip to the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is not a semicolon, that is
probably because we just skipped over the body of
a function. So, we consume a semicolon if
present, but do not issue an error message if it
is not present. */
if (cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
return;
}
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type
(declarator, decl_specifiers.type);
/* Look for an asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* Look for attributes that apply to the declaration. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* If it's an `=', then we have a constant-initializer or a
pure-specifier. It is not correct to parse the
initializer before registering the member declaration
since the member declaration should be in scope while
its initializer is processed. However, the rest of the
front end does not yet provide an interface that allows
us to handle this correctly. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* In [class.mem]:
A pure-specifier shall be used only in the declaration of
a virtual function.
A member-declarator can contain a constant-initializer
only if it declares a static member of integral or
enumeration type.
Therefore, if the DECLARATOR is for a function, we look
for a pure-specifier; otherwise, we look for a
constant-initializer. When we call `grokfield', it will
perform more stringent semantics checks. */
if (function_declarator_p (declarator))
initializer = cp_parser_pure_specifier (parser);
else
/* Parse the initializer. */
initializer = cp_parser_constant_initializer (parser);
}
/* Otherwise, there is no initializer. */
else
initializer = NULL_TREE;
/* See if we are probably looking at a function
definition. We are certainly not looking at a
member-declarator. Calling `grokfield' has
side-effects, so we must not do it unless we are sure
that we are looking at a member-declarator. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
{
/* The grammar does not allow a pure-specifier to be
used when a member function is defined. (It is
possible that this fact is an oversight in the
standard, since a pure function may be defined
outside of the class-specifier. */
if (initializer)
error ("pure-specifier on function-definition");
decl = cp_parser_save_member_function_body (parser,
&decl_specifiers,
declarator,
attributes);
/* If the member was not a friend, declare it here. */
if (!friend_p)
finish_member_declaration (decl);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a semicolon, consume it. */
if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
return;
}
else
/* Create the declaration. */
decl = grokfield (declarator, &decl_specifiers,
initializer, /*init_const_expr_p=*/true,
asm_specification,
attributes);
}
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
/* If there is any qualification still in effect, clear it
now; we will be starting fresh with the next declarator. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* If it's a `,', then there are more declarators. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
/* If the next token isn't a `;', then we have a parse error. */
else if (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
cp_parser_error (parser, "expected %<;%>");
/* Skip tokens until we find a `;'. */
cp_parser_skip_to_end_of_statement (parser);
break;
}
if (decl)
{
/* Add DECL to the list of members. */
if (!friend_p)
finish_member_declaration (decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
}
}
}
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
}
/* Parse a pure-specifier.
pure-specifier:
= 0
Returns INTEGER_ZERO_NODE if a pure specifier is found.
Otherwise, ERROR_MARK_NODE is returned. */
static tree
cp_parser_pure_specifier (cp_parser* parser)
{
cp_token *token;
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, "`='"))
return error_mark_node;
/* Look for the `0' token. */
token = cp_lexer_consume_token (parser->lexer);
/* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */
if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO))
{
cp_parser_error (parser,
"invalid pure specifier (only `= 0' is allowed)");
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
{
error ("templates may not be %<virtual%>");
return error_mark_node;
}
return integer_zero_node;
}
/* Parse a constant-initializer.
constant-initializer:
= constant-expression
Returns a representation of the constant-expression. */
static tree
cp_parser_constant_initializer (cp_parser* parser)
{
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, "`='"))
return error_mark_node;
/* It is invalid to write:
struct S { static const int i = { 7 }; };
*/
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_parser_error (parser,
"a brace-enclosed initializer is not allowed here");
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
/* Skip the initializer. */
cp_parser_skip_to_closing_brace (parser);
/* Look for the trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
return error_mark_node;
}
return cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
}
/* Derived classes [gram.class.derived] */
/* Parse a base-clause.
base-clause:
: base-specifier-list
base-specifier-list:
base-specifier
base-specifier-list , base-specifier
Returns a TREE_LIST representing the base-classes, in the order in
which they were declared. The representation of each node is as
described by cp_parser_base_specifier.
In the case that no bases are specified, this function will return
NULL_TREE, not ERROR_MARK_NODE. */
static tree
cp_parser_base_clause (cp_parser* parser)
{
tree bases = NULL_TREE;
/* Look for the `:' that begins the list. */
cp_parser_require (parser, CPP_COLON, "`:'");
/* Scan the base-specifier-list. */
while (true)
{
cp_token *token;
tree base;
/* Look for the base-specifier. */
base = cp_parser_base_specifier (parser);
/* Add BASE to the front of the list. */
if (base != error_mark_node)
{
TREE_CHAIN (base) = bases;
bases = base;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a comma, then the list is complete. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
/* PARSER->SCOPE may still be non-NULL at this point, if the last
base class had a qualified name. However, the next name that
appears is certainly not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
return nreverse (bases);
}
/* Parse a base-specifier.
base-specifier:
:: [opt] nested-name-specifier [opt] class-name
virtual access-specifier [opt] :: [opt] nested-name-specifier
[opt] class-name
access-specifier virtual [opt] :: [opt] nested-name-specifier
[opt] class-name
Returns a TREE_LIST. The TREE_PURPOSE will be one of
ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to
indicate the specifiers provided. The TREE_VALUE will be a TYPE
(or the ERROR_MARK_NODE) indicating the type that was specified. */
static tree
cp_parser_base_specifier (cp_parser* parser)
{
cp_token *token;
bool done = false;
bool virtual_p = false;
bool duplicate_virtual_error_issued_p = false;
bool duplicate_access_error_issued_p = false;
bool class_scope_p, template_p;
tree access = access_default_node;
tree type;
/* Process the optional `virtual' and `access-specifier'. */
while (!done)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Process `virtual'. */
switch (token->keyword)
{
case RID_VIRTUAL:
/* If `virtual' appears more than once, issue an error. */
if (virtual_p && !duplicate_virtual_error_issued_p)
{
cp_parser_error (parser,
"%<virtual%> specified more than once in base-specified");
duplicate_virtual_error_issued_p = true;
}
virtual_p = true;
/* Consume the `virtual' token. */
cp_lexer_consume_token (parser->lexer);
break;
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* If more than one access specifier appears, issue an
error. */
if (access != access_default_node
&& !duplicate_access_error_issued_p)
{
cp_parser_error (parser,
"more than one access specifier in base-specified");
duplicate_access_error_issued_p = true;
}
access = ridpointers[(int) token->keyword];
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
break;
default:
done = true;
break;
}
}
/* It is not uncommon to see programs mechanically, erroneously, use
the 'typename' keyword to denote (dependent) qualified types
as base classes. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
if (!processing_template_decl)
error ("keyword %<typename%> not allowed outside of templates");
else
error ("keyword %<typename%> not allowed in this context "
"(the base class is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to pretend that we have seen the `typename' keyword at this
point. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
typename_type,
/*is_declaration=*/true);
/* If the base class is given by a qualified name, assume that names
we see are type names or templates, as appropriate. */
class_scope_p = (parser->scope && TYPE_P (parser->scope));
template_p = class_scope_p && cp_parser_optional_template_keyword (parser);
/* Finally, look for the class-name. */
type = cp_parser_class_name (parser,
class_scope_p,
template_p,
typename_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
if (type == error_mark_node)
return error_mark_node;
return finish_base_specifier (TREE_TYPE (type), access, virtual_p);
}
/* Exception handling [gram.exception] */
/* Parse an (optional) exception-specification.
exception-specification:
throw ( type-id-list [opt] )
Returns a TREE_LIST representing the exception-specification. The
TREE_VALUE of each node is a type. */
static tree
cp_parser_exception_specification_opt (cp_parser* parser)
{
cp_token *token;
tree type_id_list;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `throw', then there's no exception-specification. */
if (!cp_parser_is_keyword (token, RID_THROW))
return NULL_TREE;
/* Consume the `throw'. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a `)', then there is a type-id-list. */
if (token->type != CPP_CLOSE_PAREN)
{
const char *saved_message;
/* Types may not be defined in an exception-specification. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in an exception-specification";
/* Parse the type-id-list. */
type_id_list = cp_parser_type_id_list (parser);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
}
else
type_id_list = empty_except_spec;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
return type_id_list;
}
/* Parse an (optional) type-id-list.
type-id-list:
type-id
type-id-list , type-id
Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE,
in the order that the types were presented. */
static tree
cp_parser_type_id_list (cp_parser* parser)
{
tree types = NULL_TREE;
while (true)
{
cp_token *token;
tree type;
/* Get the next type-id. */
type = cp_parser_type_id (parser);
/* Add it to the list. */
types = add_exception_specifier (types, type, /*complain=*/1);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is not a `,', we are done. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (types);
}
/* Parse a try-block.
try-block:
try compound-statement handler-seq */
static tree
cp_parser_try_block (cp_parser* parser)
{
tree try_block;
cp_parser_require_keyword (parser, RID_TRY, "`try'");
try_block = begin_try_block ();
cp_parser_compound_statement (parser, NULL, true);
finish_try_block (try_block);
cp_parser_handler_seq (parser);
finish_handler_sequence (try_block);
return try_block;
}
/* Parse a function-try-block.
function-try-block:
try ctor-initializer [opt] function-body handler-seq */
static bool
cp_parser_function_try_block (cp_parser* parser)
{
tree compound_stmt;
tree try_block;
bool ctor_initializer_p;
/* Look for the `try' keyword. */
if (!cp_parser_require_keyword (parser, RID_TRY, "`try'"))
return false;
/* Let the rest of the front-end know where we are. */
try_block = begin_function_try_block (&compound_stmt);
/* Parse the function-body. */
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
/* We're done with the `try' part. */
finish_function_try_block (try_block);
/* Parse the handlers. */
cp_parser_handler_seq (parser);
/* We're done with the handlers. */
finish_function_handler_sequence (try_block, compound_stmt);
return ctor_initializer_p;
}
/* Parse a handler-seq.
handler-seq:
handler handler-seq [opt] */
static void
cp_parser_handler_seq (cp_parser* parser)
{
while (true)
{
cp_token *token;
/* Parse the handler. */
cp_parser_handler (parser);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `catch' then there are no more handlers. */
if (!cp_parser_is_keyword (token, RID_CATCH))
break;
}
}
/* Parse a handler.
handler:
catch ( exception-declaration ) compound-statement */
static void
cp_parser_handler (cp_parser* parser)
{
tree handler;
tree declaration;
cp_parser_require_keyword (parser, RID_CATCH, "`catch'");
handler = begin_handler ();
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
declaration = cp_parser_exception_declaration (parser);
finish_handler_parms (declaration, handler);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
cp_parser_compound_statement (parser, NULL, false);
finish_handler (handler);
}
/* Parse an exception-declaration.
exception-declaration:
type-specifier-seq declarator
type-specifier-seq abstract-declarator
type-specifier-seq
...
Returns a VAR_DECL for the declaration, or NULL_TREE if the
ellipsis variant is used. */
static tree
cp_parser_exception_declaration (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
const char *saved_message;
/* If it's an ellipsis, it's easy to handle. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL_TREE;
}
/* Types may not be defined in exception-declarations. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= "types may not be defined in exception-declarations";
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_condition=*/false,
&type_specifiers);
/* If it's a `)', then there is no declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
declarator = NULL;
else
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
if (!type_specifiers.any_specifiers_p)
return error_mark_node;
return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL);
}
/* Parse a throw-expression.
throw-expression:
throw assignment-expression [opt]
Returns a THROW_EXPR representing the throw-expression. */
static tree
cp_parser_throw_expression (cp_parser* parser)
{
tree expression;
cp_token* token;
cp_parser_require_keyword (parser, RID_THROW, "`throw'");
token = cp_lexer_peek_token (parser->lexer);
/* Figure out whether or not there is an assignment-expression
following the "throw" keyword. */
if (token->type == CPP_COMMA
|| token->type == CPP_SEMICOLON
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_COLON)
expression = NULL_TREE;
else
expression = cp_parser_assignment_expression (parser,
/*cast_p=*/false);
return build_throw (expression);
}
/* GNU Extensions */
/* Parse an (optional) asm-specification.
asm-specification:
asm ( string-literal )
If the asm-specification is present, returns a STRING_CST
corresponding to the string-literal. Otherwise, returns
NULL_TREE. */
static tree
cp_parser_asm_specification_opt (cp_parser* parser)
{
cp_token *token;
tree asm_specification;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token isn't the `asm' keyword, then there's no
asm-specification. */
if (!cp_parser_is_keyword (token, RID_ASM))
return NULL_TREE;
/* Consume the `asm' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Look for the string-literal. */
asm_specification = cp_parser_string_literal (parser, false, false);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`('");
return asm_specification;
}
/* Parse an asm-operand-list.
asm-operand-list:
asm-operand
asm-operand-list , asm-operand
asm-operand:
string-literal ( expression )
[ string-literal ] string-literal ( expression )
Returns a TREE_LIST representing the operands. The TREE_VALUE of
each node is the expression. The TREE_PURPOSE is itself a
TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed
string-literal (or NULL_TREE if not present) and whose TREE_VALUE
is a STRING_CST for the string literal before the parenthesis. */
static tree
cp_parser_asm_operand_list (cp_parser* parser)
{
tree asm_operands = NULL_TREE;
while (true)
{
tree string_literal;
tree expression;
tree name;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Read the operand name. */
name = cp_parser_identifier (parser);
if (name != error_mark_node)
name = build_string (IDENTIFIER_LENGTH (name),
IDENTIFIER_POINTER (name));
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'");
}
else
name = NULL_TREE;
/* Look for the string-literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Parse the expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Add this operand to the list. */
asm_operands = tree_cons (build_tree_list (name, string_literal),
expression,
asm_operands);
/* If the next token is not a `,', there are no more
operands. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (asm_operands);
}
/* Parse an asm-clobber-list.
asm-clobber-list:
string-literal
asm-clobber-list , string-literal
Returns a TREE_LIST, indicating the clobbers in the order that they
appeared. The TREE_VALUE of each node is a STRING_CST. */
static tree
cp_parser_asm_clobber_list (cp_parser* parser)
{
tree clobbers = NULL_TREE;
while (true)
{
tree string_literal;
/* Look for the string literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Add it to the list. */
clobbers = tree_cons (NULL_TREE, string_literal, clobbers);
/* If the next token is not a `,', then the list is
complete. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return clobbers;
}
/* Parse an (optional) series of attributes.
attributes:
attributes attribute
attribute:
__attribute__ (( attribute-list [opt] ))
The return value is as for cp_parser_attribute_list. */
static tree
cp_parser_attributes_opt (cp_parser* parser)
{
tree attributes = NULL_TREE;
while (true)
{
cp_token *token;
tree attribute_list;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `__attribute__', then we're done. */
if (token->keyword != RID_ATTRIBUTE)
break;
/* Consume the `__attribute__' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the two `(' tokens. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_CLOSE_PAREN)
/* Parse the attribute-list. */
attribute_list = cp_parser_attribute_list (parser);
else
/* If the next token is a `)', then there is no attribute
list. */
attribute_list = NULL;
/* Look for the two `)' tokens. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* Add these new attributes to the list. */
attributes = chainon (attributes, attribute_list);
}
return attributes;
}
/* Parse an attribute-list.
attribute-list:
attribute
attribute-list , attribute
attribute:
identifier
identifier ( identifier )
identifier ( identifier , expression-list )
identifier ( expression-list )
Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds
to an attribute. The TREE_PURPOSE of each node is the identifier
indicating which attribute is in use. The TREE_VALUE represents
the arguments, if any. */
static tree
cp_parser_attribute_list (cp_parser* parser)
{
tree attribute_list = NULL_TREE;
bool save_translate_strings_p = parser->translate_strings_p;
parser->translate_strings_p = false;
while (true)
{
cp_token *token;
tree identifier;
tree attribute;
/* Look for the identifier. We also allow keywords here; for
example `__attribute__ ((const))' is legal. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->type == CPP_KEYWORD)
{
tree arguments = NULL_TREE;
/* Consume the token. */
token = cp_lexer_consume_token (parser->lexer);
/* Save away the identifier that indicates which attribute
this is. */
identifier = token->u.value;
attribute = build_tree_list (identifier, NULL_TREE);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an `(', then parse the attribute arguments. */
if (token->type == CPP_OPEN_PAREN)
{
arguments = cp_parser_parenthesized_expression_list
(parser, true, /*cast_p=*/false,
/*non_constant_p=*/NULL);
/* Save the arguments away. */
TREE_VALUE (attribute) = arguments;
}
if (arguments != error_mark_node)
{
/* Add this attribute to the list. */
TREE_CHAIN (attribute) = attribute_list;
attribute_list = attribute;
}
token = cp_lexer_peek_token (parser->lexer);
}
/* Now, look for more attributes. If the next token isn't a
`,', we're done. */
if (token->type != CPP_COMMA)
break;
/* Consume the comma and keep going. */
cp_lexer_consume_token (parser->lexer);
}
parser->translate_strings_p = save_translate_strings_p;
/* We built up the list in reverse order. */
return nreverse (attribute_list);
}
/* Parse an optional `__extension__' keyword. Returns TRUE if it is
present, and FALSE otherwise. *SAVED_PEDANTIC is set to the
current value of the PEDANTIC flag, regardless of whether or not
the `__extension__' keyword is present. The caller is responsible
for restoring the value of the PEDANTIC flag. */
static bool
cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic)
{
/* Save the old value of the PEDANTIC flag. */
*saved_pedantic = pedantic;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION))
{
/* Consume the `__extension__' token. */
cp_lexer_consume_token (parser->lexer);
/* We're not being pedantic while the `__extension__' keyword is
in effect. */
pedantic = 0;
return true;
}
return false;
}
/* Parse a label declaration.
label-declaration:
__label__ label-declarator-seq ;
label-declarator-seq:
identifier , label-declarator-seq
identifier */
static void
cp_parser_label_declaration (cp_parser* parser)
{
/* Look for the `__label__' keyword. */
cp_parser_require_keyword (parser, RID_LABEL, "`__label__'");
while (true)
{
tree identifier;
/* Look for an identifier. */
identifier = cp_parser_identifier (parser);
/* If we failed, stop. */
if (identifier == error_mark_node)
break;
/* Declare it as a label. */
finish_label_decl (identifier);
/* If the next token is a `;', stop. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
break;
/* Look for the `,' separating the label declarations. */
cp_parser_require (parser, CPP_COMMA, "`,'");
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, "`;'");
}
/* Support Functions */
/* Looks up NAME in the current scope, as given by PARSER->SCOPE.
NAME should have one of the representations used for an
id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE
is returned. If PARSER->SCOPE is a dependent type, then a
SCOPE_REF is returned.
If NAME is a TEMPLATE_ID_EXPR, then it will be immediately
returned; the name was already resolved when the TEMPLATE_ID_EXPR
was formed. Abstractly, such entities should not be passed to this
function, because they do not need to be looked up, but it is
simpler to check for this special case here, rather than at the
call-sites.
In cases not explicitly covered above, this function returns a
DECL, OVERLOAD, or baselink representing the result of the lookup.
If there was no entity with the indicated NAME, the ERROR_MARK_NODE
is returned.
If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword
(e.g., "struct") that was used. In that case bindings that do not
refer to types are ignored.
If IS_TEMPLATE is TRUE, bindings that do not refer to templates are
ignored.
If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces
are ignored.
If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent
types.
If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a
TREE_LIST of candidates if name-lookup results in an ambiguity, and
NULL_TREE otherwise. */
static tree
cp_parser_lookup_name (cp_parser *parser, tree name,
enum tag_types tag_type,
bool is_template,
bool is_namespace,
bool check_dependency,
tree *ambiguous_decls)
{
int flags = 0;
tree decl;
tree object_type = parser->context->object_type;
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
flags |= LOOKUP_COMPLAIN;
/* Assume that the lookup will be unambiguous. */
if (ambiguous_decls)
*ambiguous_decls = NULL_TREE;
/* Now that we have looked up the name, the OBJECT_TYPE (if any) is
no longer valid. Note that if we are parsing tentatively, and
the parse fails, OBJECT_TYPE will be automatically restored. */
parser->context->object_type = NULL_TREE;
if (name == error_mark_node)
return error_mark_node;
/* A template-id has already been resolved; there is no lookup to
do. */
if (TREE_CODE (name) == TEMPLATE_ID_EXPR)
return name;
if (BASELINK_P (name))
{
gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name))
== TEMPLATE_ID_EXPR);
return name;
}
/* A BIT_NOT_EXPR is used to represent a destructor. By this point,
it should already have been checked to make sure that the name
used matches the type being destroyed. */
if (TREE_CODE (name) == BIT_NOT_EXPR)
{
tree type;
/* Figure out to which type this destructor applies. */
if (parser->scope)
type = parser->scope;
else if (object_type)
type = object_type;
else
type = current_class_type;
/* If that's not a class type, there is no destructor. */
if (!type || !CLASS_TYPE_P (type))
return error_mark_node;
if (CLASSTYPE_LAZY_DESTRUCTOR (type))
lazily_declare_fn (sfk_destructor, type);
if (!CLASSTYPE_DESTRUCTORS (type))
return error_mark_node;
/* If it was a class type, return the destructor. */
return CLASSTYPE_DESTRUCTORS (type);
}
/* By this point, the NAME should be an ordinary identifier. If
the id-expression was a qualified name, the qualifying scope is
stored in PARSER->SCOPE at this point. */
gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
/* Perform the lookup. */
if (parser->scope)
{
bool dependent_p;
if (parser->scope == error_mark_node)
return error_mark_node;
/* If the SCOPE is dependent, the lookup must be deferred until
the template is instantiated -- unless we are explicitly
looking up names in uninstantiated templates. Even then, we
cannot look up the name if the scope is not a class type; it
might, for example, be a template type parameter. */
dependent_p = (TYPE_P (parser->scope)
&& !(parser->in_declarator_p
&& currently_open_class (parser->scope))
&& dependent_type_p (parser->scope));
if ((check_dependency || !CLASS_TYPE_P (parser->scope))
&& dependent_p)
{
if (tag_type)
{
tree type;
/* The resolution to Core Issue 180 says that `struct
A::B' should be considered a type-name, even if `A'
is dependent. */
type = make_typename_type (parser->scope, name, tag_type,
/*complain=*/tf_error);
decl = TYPE_NAME (type);
}
else if (is_template
&& (cp_parser_next_token_ends_template_argument_p (parser)
|| cp_lexer_next_token_is (parser->lexer,
CPP_CLOSE_PAREN)))
decl = make_unbound_class_template (parser->scope,
name, NULL_TREE,
/*complain=*/tf_error);
else
decl = build_qualified_name (/*type=*/NULL_TREE,
parser->scope, name,
is_template);
}
else
{
tree pushed_scope = NULL_TREE;
/* If PARSER->SCOPE is a dependent type, then it must be a
class type, and we must not be checking dependencies;
otherwise, we would have processed this lookup above. So
that PARSER->SCOPE is not considered a dependent base by
lookup_member, we must enter the scope here. */
if (dependent_p)
pushed_scope = push_scope (parser->scope);
/* If the PARSER->SCOPE is a template specialization, it
may be instantiated during name lookup. In that case,
errors may be issued. Even if we rollback the current
tentative parse, those errors are valid. */
decl = lookup_qualified_name (parser->scope, name,
tag_type != none_type,
/*complain=*/true);
if (pushed_scope)
pop_scope (pushed_scope);
}
parser->qualifying_scope = parser->scope;
parser->object_scope = NULL_TREE;
}
else if (object_type)
{
tree object_decl = NULL_TREE;
/* Look up the name in the scope of the OBJECT_TYPE, unless the
OBJECT_TYPE is not a class. */
if (CLASS_TYPE_P (object_type))
/* If the OBJECT_TYPE is a template specialization, it may
be instantiated during name lookup. In that case, errors
may be issued. Even if we rollback the current tentative
parse, those errors are valid. */
object_decl = lookup_member (object_type,
name,
/*protect=*/0,
tag_type != none_type);
/* Look it up in the enclosing context, too. */
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, flags);
parser->object_scope = object_type;
parser->qualifying_scope = NULL_TREE;
if (object_decl)
decl = object_decl;
}
else
{
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, flags);
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
/* If the lookup failed, let our caller know. */
if (!decl || decl == error_mark_node)
return error_mark_node;
/* If it's a TREE_LIST, the result of the lookup was ambiguous. */
if (TREE_CODE (decl) == TREE_LIST)
{
if (ambiguous_decls)
*ambiguous_decls = decl;
/* The error message we have to print is too complicated for
cp_parser_error, so we incorporate its actions directly. */
if (!cp_parser_simulate_error (parser))
{
error ("reference to %qD is ambiguous", name);
print_candidates (decl);
}
return error_mark_node;
}
gcc_assert (DECL_P (decl)
|| TREE_CODE (decl) == OVERLOAD
|| TREE_CODE (decl) == SCOPE_REF
|| TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE
|| BASELINK_P (decl));
/* If we have resolved the name of a member declaration, check to
see if the declaration is accessible. When the name resolves to
set of overloaded functions, accessibility is checked when
overload resolution is done.
During an explicit instantiation, access is not checked at all,
as per [temp.explicit]. */
if (DECL_P (decl))
check_accessibility_of_qualified_id (decl, object_type, parser->scope);
return decl;
}
/* Like cp_parser_lookup_name, but for use in the typical case where
CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE,
IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */
static tree
cp_parser_lookup_name_simple (cp_parser* parser, tree name)
{
return cp_parser_lookup_name (parser, name,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL);
}
/* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in
the current context, return the TYPE_DECL. If TAG_NAME_P is
true, the DECL indicates the class being defined in a class-head,
or declared in an elaborated-type-specifier.
Otherwise, return DECL. */
static tree
cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p)
{
/* If the TEMPLATE_DECL is being declared as part of a class-head,
the translation from TEMPLATE_DECL to TYPE_DECL occurs:
struct A {
template <typename T> struct B;
};
template <typename T> struct A::B {};
Similarly, in an elaborated-type-specifier:
namespace N { struct X{}; }
struct A {
template <typename T> friend struct N::X;
};
However, if the DECL refers to a class type, and we are in
the scope of the class, then the name lookup automatically
finds the TYPE_DECL created by build_self_reference rather
than a TEMPLATE_DECL. For example, in:
template <class T> struct S {
S s;
};
there is no need to handle such case. */
if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p)
return DECL_TEMPLATE_RESULT (decl);
return decl;
}
/* If too many, or too few, template-parameter lists apply to the
declarator, issue an error message. Returns TRUE if all went well,
and FALSE otherwise. */
static bool
cp_parser_check_declarator_template_parameters (cp_parser* parser,
cp_declarator *declarator)
{
unsigned num_templates;
/* We haven't seen any classes that involve template parameters yet. */
num_templates = 0;
switch (declarator->kind)
{
case cdk_id:
if (declarator->u.id.qualifying_scope)
{
tree scope;
tree member;
scope = declarator->u.id.qualifying_scope;
member = declarator->u.id.unqualified_name;
while (scope && CLASS_TYPE_P (scope))
{
/* You're supposed to have one `template <...>'
for every template class, but you don't need one
for a full specialization. For example:
template <class T> struct S{};
template <> struct S<int> { void f(); };
void S<int>::f () {}
is correct; there shouldn't be a `template <>' for
the definition of `S<int>::f'. */
if (!CLASSTYPE_TEMPLATE_INFO (scope))
/* If SCOPE does not have template information of any
kind, then it is not a template, nor is it nested
within a template. */
break;
if (explicit_class_specialization_p (scope))
break;
if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)))
++num_templates;
scope = TYPE_CONTEXT (scope);
}
}
else if (TREE_CODE (declarator->u.id.unqualified_name)
== TEMPLATE_ID_EXPR)
/* If the DECLARATOR has the form `X<y>' then it uses one
additional level of template parameters. */
++num_templates;
return cp_parser_check_template_parameters (parser,
num_templates);
case cdk_function:
case cdk_array:
case cdk_pointer:
case cdk_reference:
case cdk_ptrmem:
return (cp_parser_check_declarator_template_parameters
(parser, declarator->declarator));
case cdk_error:
return true;
default:
gcc_unreachable ();
}
return false;
}
/* NUM_TEMPLATES were used in the current declaration. If that is
invalid, return FALSE and issue an error messages. Otherwise,
return TRUE. */
static bool
cp_parser_check_template_parameters (cp_parser* parser,
unsigned num_templates)
{
/* If there are more template classes than parameter lists, we have
something like:
template <class T> void S<T>::R<T>::f (); */
if (parser->num_template_parameter_lists < num_templates)
{
error ("too few template-parameter-lists");
return false;
}
/* If there are the same number of template classes and parameter
lists, that's OK. */
if (parser->num_template_parameter_lists == num_templates)
return true;
/* If there are more, but only one more, then we are referring to a
member template. That's OK too. */
if (parser->num_template_parameter_lists == num_templates + 1)
return true;
/* Otherwise, there are too many template parameter lists. We have
something like:
template <class T> template <class U> void S::f(); */
error ("too many template-parameter-lists");
return false;
}
/* Parse an optional `::' token indicating that the following name is
from the global namespace. If so, PARSER->SCOPE is set to the
GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE,
unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone.
Returns the new value of PARSER->SCOPE, if the `::' token is
present, and NULL_TREE otherwise. */
static tree
cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a `::' token then we're starting from the
global namespace, not our current location. */
if (token->type == CPP_SCOPE)
{
/* Consume the `::' token. */
cp_lexer_consume_token (parser->lexer);
/* Set the SCOPE so that we know where to start the lookup. */
parser->scope = global_namespace;
parser->qualifying_scope = global_namespace;
parser->object_scope = NULL_TREE;
return parser->scope;
}
else if (!current_scope_valid_p)
{
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
return NULL_TREE;
}
/* Returns TRUE if the upcoming token sequence is the start of a
constructor declarator. If FRIEND_P is true, the declarator is
preceded by the `friend' specifier. */
static bool
cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p)
{
bool constructor_p;
tree type_decl = NULL_TREE;
bool nested_name_p;
cp_token *next_token;
/* The common case is that this is not a constructor declarator, so
try to avoid doing lots of work if at all possible. It's not
valid declare a constructor at function scope. */
if (parser->in_function_body)
return false;
/* And only certain tokens can begin a constructor declarator. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type != CPP_NAME
&& next_token->type != CPP_SCOPE
&& next_token->type != CPP_NESTED_NAME_SPECIFIER
&& next_token->type != CPP_TEMPLATE_ID)
return false;
/* Parse tentatively; we are going to roll back all of the tokens
consumed here. */
cp_parser_parse_tentatively (parser);
/* Assume that we are looking at a constructor declarator. */
constructor_p = true;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
nested_name_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
/* Outside of a class-specifier, there must be a
nested-name-specifier. */
if (!nested_name_p &&
(!at_class_scope_p () || !TYPE_BEING_DEFINED (current_class_type)
|| friend_p))
constructor_p = false;
/* If we still think that this might be a constructor-declarator,
look for a class-name. */
if (constructor_p)
{
/* If we have:
template <typename T> struct S { S(); };
template <typename T> S<T>::S ();
we must recognize that the nested `S' names a class.
Similarly, for:
template <typename T> S<T>::S<T> ();
we must recognize that the nested `S' names a template. */
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/false,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If there was no class-name, then this is not a constructor. */
constructor_p = !cp_parser_error_occurred (parser);
}
/* If we're still considering a constructor, we have to see a `(',
to begin the parameter-declaration-clause, followed by either a
`)', an `...', or a decl-specifier. We need to check for a
type-specifier to avoid being fooled into thinking that:
S::S (f) (int);
is a constructor. (It is actually a function named `f' that
takes one parameter (of type `int') and returns a value of type
`S::S'. */
if (constructor_p
&& cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS)
/* A parameter declaration begins with a decl-specifier,
which is either the "attribute" keyword, a storage class
specifier, or (usually) a type-specifier. */
&& !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer))
{
tree type;
tree pushed_scope = NULL_TREE;
unsigned saved_num_template_parameter_lists;
/* Names appearing in the type-specifier should be looked up
in the scope of the class. */
if (current_class_type)
type = NULL_TREE;
else
{
type = TREE_TYPE (type_decl);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
type = resolve_typename_type (type,
/*only_current_p=*/false);
if (type == error_mark_node)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
}
pushed_scope = push_scope (type);
}
/* Inside the constructor parameter list, surrounding
template-parameter-lists do not apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* Look for the type-specifier. */
cp_parser_type_specifier (parser,
CP_PARSER_FLAGS_NONE,
/*decl_specs=*/NULL,
/*is_declarator=*/true,
/*declares_class_or_enum=*/NULL,
/*is_cv_qualifier=*/NULL);
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
/* Leave the scope of the class. */
if (pushed_scope)
pop_scope (pushed_scope);
constructor_p = !cp_parser_error_occurred (parser);
}
}
else
constructor_p = false;
/* We did not really want to consume any tokens. */
cp_parser_abort_tentative_parse (parser);
return constructor_p;
}
/* Parse the definition of the function given by the DECL_SPECIFIERS,
ATTRIBUTES, and DECLARATOR. The access checks have been deferred;
they must be performed once we are in the scope of the function.
Returns the function defined. */
static tree
cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
tree attributes,
const cp_declarator *declarator)
{
tree fn;
bool success_p;
/* Begin the function-definition. */
success_p = start_function (decl_specifiers, declarator, attributes);
/* The things we're about to see are not directly qualified by any
template headers we've seen thus far. */
reset_specialization ();
/* If there were names looked up in the decl-specifier-seq that we
did not check, check them now. We must wait until we are in the
scope of the function to perform the checks, since the function
might be a friend. */
perform_deferred_access_checks ();
if (!success_p)
{
/* Skip the entire function. */
cp_parser_skip_to_end_of_block_or_statement (parser);
fn = error_mark_node;
}
else
fn = cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/false);
return fn;
}
/* Parse the part of a function-definition that follows the
declarator. INLINE_P is TRUE iff this function is an inline
function defined with a class-specifier.
Returns the function defined. */
static tree
cp_parser_function_definition_after_declarator (cp_parser* parser,
bool inline_p)
{
tree fn;
bool ctor_initializer_p = false;
bool saved_in_unbraced_linkage_specification_p;
bool saved_in_function_body;
unsigned saved_num_template_parameter_lists;
saved_in_function_body = parser->in_function_body;
parser->in_function_body = true;
/* If the next token is `return', then the code may be trying to
make use of the "named return value" extension that G++ used to
support. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN))
{
/* Consume the `return' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier that indicates what value is to be
returned. */
cp_parser_identifier (parser);
/* Issue an error message. */
error ("named return values are no longer supported");
/* Skip tokens until we reach the start of the function body. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
cp_lexer_consume_token (parser->lexer);
}
}
/* The `extern' in `extern "C" void f () { ... }' does not apply to
anything declared inside `f'. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Inside the function, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* If the next token is `try', then we are looking at a
function-try-block. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
/* A function-try-block includes the function-body, so we only do
this next part if we're not processing a function-try-block. */
else
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
/* Finish the function. */
fn = finish_function ((ctor_initializer_p ? 1 : 0) |
(inline_p ? 2 : 0));
/* Generate code for it, if necessary. */
expand_or_defer_fn (fn);
/* Restore the saved values. */
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
parser->in_function_body = saved_in_function_body;
return fn;
}
/* Parse a template-declaration, assuming that the `export' (and
`extern') keywords, if present, has already been scanned. MEMBER_P
is as for cp_parser_template_declaration. */
static void
cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p)
{
tree decl = NULL_TREE;
VEC (deferred_access_check,gc) *checks;
tree parameter_list;
bool friend_p = false;
bool need_lang_pop;
/* Look for the `template' keyword. */
if (!cp_parser_require_keyword (parser, RID_TEMPLATE, "`template'"))
return;
/* And the `<'. */
if (!cp_parser_require (parser, CPP_LESS, "`<'"))
return;
if (at_class_scope_p () && current_function_decl)
{
/* 14.5.2.2 [temp.mem]
A local class shall not have member templates. */
error ("invalid declaration of member template in local class");
cp_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* [temp]
A template ... shall not have C linkage. */
if (current_lang_name == lang_name_c)
{
error ("template with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* We cannot perform access checks on the template parameter
declarations until we know what is being declared, just as we
cannot check the decl-specifier list. */
push_deferring_access_checks (dk_deferred);
/* If the next token is `>', then we have an invalid
specialization. Rather than complain about an invalid template
parameter, issue an error message here. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER))
{
cp_parser_error (parser, "invalid explicit specialization");
begin_specialization ();
parameter_list = NULL_TREE;
}
else
/* Parse the template parameters. */
parameter_list = cp_parser_template_parameter_list (parser);
/* Get the deferred access checks from the parameter list. These
will be checked once we know what is being declared, as for a
member template the checks must be performed in the scope of the
class containing the member. */
checks = get_deferred_access_checks ();
/* Look for the `>'. */
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* We just processed one more parameter list. */
++parser->num_template_parameter_lists;
/* If the next token is `template', there are more template
parameters. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TEMPLATE))
cp_parser_template_declaration_after_export (parser, member_p);
else
{
/* There are no access checks when parsing a template, as we do not
know if a specialization will be a friend. */
push_deferring_access_checks (dk_no_check);
decl = cp_parser_single_declaration (parser,
checks,
member_p,
&friend_p);
pop_deferring_access_checks ();
/* If this is a member template declaration, let the front
end know. */
if (member_p && !friend_p && decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
cp_parser_check_access_in_redeclaration (decl);
decl = finish_member_template_decl (decl);
}
else if (friend_p && decl && TREE_CODE (decl) == TYPE_DECL)
make_friend_class (current_class_type, TREE_TYPE (decl),
/*complain=*/true);
}
/* We are done with the current parameter list. */
--parser->num_template_parameter_lists;
pop_deferring_access_checks ();
/* Finish up. */
finish_template_decl (parameter_list);
/* Register member declarations. */
if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl))
finish_member_declaration (decl);
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* If DECL is a function template, we must return to parse it later.
(Even though there is no definition, there might be default
arguments that need handling.) */
if (member_p && decl
&& (TREE_CODE (decl) == FUNCTION_DECL
|| DECL_FUNCTION_TEMPLATE_P (decl)))
TREE_VALUE (parser->unparsed_functions_queues)
= tree_cons (NULL_TREE, decl,
TREE_VALUE (parser->unparsed_functions_queues));
}
/* Perform the deferred access checks from a template-parameter-list.
CHECKS is a TREE_LIST of access checks, as returned by
get_deferred_access_checks. */
static void
cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)* checks)
{
++processing_template_parmlist;
perform_access_checks (checks);
--processing_template_parmlist;
}
/* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or
`function-definition' sequence. MEMBER_P is true, this declaration
appears in a class scope.
Returns the DECL for the declared entity. If FRIEND_P is non-NULL,
*FRIEND_P is set to TRUE iff the declaration is a friend. */
static tree
cp_parser_single_declaration (cp_parser* parser,
VEC (deferred_access_check,gc)* checks,
bool member_p,
bool* friend_p)
{
int declares_class_or_enum;
tree decl = NULL_TREE;
cp_decl_specifier_seq decl_specifiers;
bool function_definition_p = false;
/* This function is only used when processing a template
declaration. */
gcc_assert (innermost_scope_kind () == sk_template_parms
|| innermost_scope_kind () == sk_template_spec);
/* Defer access checks until we know what is being declared. */
push_deferring_access_checks (dk_deferred);
/* Try the `decl-specifier-seq [opt] init-declarator [opt]'
alternative. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
if (friend_p)
*friend_p = cp_parser_friend_p (&decl_specifiers);
/* There are no template typedefs. */
if (decl_specifiers.specs[(int) ds_typedef])
{
error ("template declaration of %qs", "typedef");
decl = error_mark_node;
}
/* Gather up the access checks that occurred the
decl-specifier-seq. */
stop_deferring_access_checks ();
/* Check for the declaration of a template class. */
if (declares_class_or_enum)
{
if (cp_parser_declares_only_class_p (parser))
{
decl = shadow_tag (&decl_specifiers);
/* In this case:
struct C {
friend template <typename T> struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by shadow_tag. */
if (friend_p && *friend_p
&& !decl
&& decl_specifiers.type
&& TYPE_P (decl_specifiers.type))
decl = decl_specifiers.type;
if (decl && decl != error_mark_node)
decl = TYPE_NAME (decl);
else
decl = error_mark_node;
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
}
}
/* If it's not a template class, try for a template function. If
the next token is a `;', then this declaration does not declare
anything. But, if there were errors in the decl-specifiers, then
the error might well have come from an attempted class-specifier.
In that case, there's no need to warn about a missing declarator. */
if (!decl
&& (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)
|| decl_specifiers.type != error_mark_node))
decl = cp_parser_init_declarator (parser,
&decl_specifiers,
checks,
/*function_definition_allowed_p=*/true,
member_p,
declares_class_or_enum,
&function_definition_p);
pop_deferring_access_checks ();
/* Clear any current qualification; whatever comes next is the start
of something new. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Look for a trailing `;' after the declaration. */
if (!function_definition_p
&& (decl == error_mark_node
|| !cp_parser_require (parser, CPP_SEMICOLON, "`;'")))
cp_parser_skip_to_end_of_block_or_statement (parser);
return decl;
}
/* Parse a cast-expression that is not the operand of a unary "&". */
static tree
cp_parser_simple_cast_expression (cp_parser *parser)
{
return cp_parser_cast_expression (parser, /*address_p=*/false,
/*cast_p=*/false);
}
/* Parse a functional cast to TYPE. Returns an expression
representing the cast. */
static tree
cp_parser_functional_cast (cp_parser* parser, tree type)
{
tree expression_list;
tree cast;
expression_list
= cp_parser_parenthesized_expression_list (parser, false,
/*cast_p=*/true,
/*non_constant_p=*/NULL);
cast = build_functional_cast (type, expression_list);
/* [expr.const]/1: In an integral constant expression "only type
conversions to integral or enumeration type can be used". */
if (TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
if (cast != error_mark_node
&& !cast_valid_in_integral_constant_expression_p (type)
&& (cp_parser_non_integral_constant_expression
(parser, "a call to a constructor")))
return error_mark_node;
return cast;
}
/* Save the tokens that make up the body of a member function defined
in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have
already been parsed. The ATTRIBUTES are any GNU "__attribute__"
specifiers applied to the declaration. Returns the FUNCTION_DECL
for the member function. */
static tree
cp_parser_save_member_function_body (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree attributes)
{
cp_token *first;
cp_token *last;
tree fn;
/* Create the function-declaration. */
fn = start_method (decl_specifiers, declarator, attributes);
/* If something went badly wrong, bail out now. */
if (fn == error_mark_node)
{
/* If there's a function-body, skip it. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
/* Remember it, if there default args to post process. */
cp_parser_save_default_args (parser, fn);
/* Save away the tokens that make up the body of the
function. */
first = parser->lexer->next_token;
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
/* Handle function try blocks. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH))
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
last = parser->lexer->next_token;
/* Save away the inline definition; we will process it when the
class is complete. */
DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last);
DECL_PENDING_INLINE_P (fn) = 1;
/* We need to know that this was defined in the class, so that
friend templates are handled correctly. */
DECL_INITIALIZED_IN_CLASS_P (fn) = 1;
/* We're done with the inline definition. */
finish_method (fn);
/* Add FN to the queue of functions to be parsed later. */
TREE_VALUE (parser->unparsed_functions_queues)
= tree_cons (NULL_TREE, fn,
TREE_VALUE (parser->unparsed_functions_queues));
return fn;
}
/* Parse a template-argument-list, as well as the trailing ">" (but
not the opening ">"). See cp_parser_template_argument_list for the
return value. */
static tree
cp_parser_enclosed_template_argument_list (cp_parser* parser)
{
tree arguments;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
bool saved_greater_than_is_operator_p;
bool saved_skip_evaluation;
/* [temp.names]
When parsing a template-id, the first non-nested `>' is taken as
the end of the template-argument-list rather than a greater-than
operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = false;
/* Parsing the argument list may modify SCOPE, so we save it
here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We need to evaluate the template arguments, even though this
template-id may be nested within a "sizeof". */
saved_skip_evaluation = skip_evaluation;
skip_evaluation = false;
/* Parse the template-argument-list itself. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER))
arguments = NULL_TREE;
else
arguments = cp_parser_template_argument_list (parser);
/* Look for the `>' that ends the template-argument-list. If we find
a '>>' instead, it's probably just a typo. */
if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
if (!saved_greater_than_is_operator_p)
{
/* If we're in a nested template argument list, the '>>' has
to be a typo for '> >'. We emit the error message, but we
continue parsing and we push a '>' as next token, so that
the argument list will be parsed correctly. Note that the
global source location is still on the token before the
'>>', so we need to say explicitly where we want it. */
cp_token *token = cp_lexer_peek_token (parser->lexer);
error ("%H%<>>%> should be %<> >%> "
"within a nested template argument list",
&token->location);
/* ??? Proper recovery should terminate two levels of
template argument list here. */
token->type = CPP_GREATER;
}
else
{
/* If this is not a nested template argument list, the '>>'
is a typo for '>'. Emit an error message and continue.
Same deal about the token location, but here we can get it
right by consuming the '>>' before issuing the diagnostic. */
cp_lexer_consume_token (parser->lexer);
error ("spurious %<>>%>, use %<>%> to terminate "
"a template argument list");
}
}
else
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* The `>' token might be a greater-than operator again now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Restore the SAVED_SCOPE. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
skip_evaluation = saved_skip_evaluation;
return arguments;
}
/* MEMBER_FUNCTION is a member function, or a friend. If default
arguments, or the body of the function have not yet been parsed,
parse them now. */
static void
cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function)
{
/* If this member is a template, get the underlying
FUNCTION_DECL. */
if (DECL_FUNCTION_TEMPLATE_P (member_function))
member_function = DECL_TEMPLATE_RESULT (member_function);
/* There should not be any class definitions in progress at this
point; the bodies of members are only parsed outside of all class
definitions. */
gcc_assert (parser->num_classes_being_defined == 0);
/* While we're parsing the member functions we might encounter more
classes. We want to handle them right away, but we don't want
them getting mixed up with functions that are currently in the
queue. */
parser->unparsed_functions_queues
= tree_cons (NULL_TREE, NULL_TREE, parser->unparsed_functions_queues);
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (member_function);
/* If the body of the function has not yet been parsed, parse it
now. */
if (DECL_PENDING_INLINE_P (member_function))
{
tree function_scope;
cp_token_cache *tokens;
/* The function is no longer pending; we are processing it. */
tokens = DECL_PENDING_INLINE_INFO (member_function);
DECL_PENDING_INLINE_INFO (member_function) = NULL;
DECL_PENDING_INLINE_P (member_function) = 0;
/* If this is a local class, enter the scope of the containing
function. */
function_scope = current_function_decl;
if (function_scope)
push_function_context_to (function_scope);
/* Push the body of the function onto the lexer stack. */
cp_parser_push_lexer_for_tokens (parser, tokens);
/* Let the front end know that we going to be defining this
function. */
start_preparsed_function (member_function, NULL_TREE,
SF_PRE_PARSED | SF_INCLASS_INLINE);
/* Don't do access checking if it is a templated function. */
if (processing_template_decl)
push_deferring_access_checks (dk_no_check);
/* Now, parse the body of the function. */
cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/true);
if (processing_template_decl)
pop_deferring_access_checks ();
/* Leave the scope of the containing function. */
if (function_scope)
pop_function_context_from (function_scope);
cp_parser_pop_lexer (parser);
}
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
/* Restore the queue. */
parser->unparsed_functions_queues
= TREE_CHAIN (parser->unparsed_functions_queues);
}
/* If DECL contains any default args, remember it on the unparsed
functions queue. */
static void
cp_parser_save_default_args (cp_parser* parser, tree decl)
{
tree probe;
for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl));
probe;
probe = TREE_CHAIN (probe))
if (TREE_PURPOSE (probe))
{
TREE_PURPOSE (parser->unparsed_functions_queues)
= tree_cons (current_class_type, decl,
TREE_PURPOSE (parser->unparsed_functions_queues));
break;
}
}
/* FN is a FUNCTION_DECL which may contains a parameter with an
unparsed DEFAULT_ARG. Parse the default args now. This function
assumes that the current scope is the scope in which the default
argument should be processed. */
static void
cp_parser_late_parsing_default_args (cp_parser *parser, tree fn)
{
bool saved_local_variables_forbidden_p;
tree parm;
/* While we're parsing the default args, we might (due to the
statement expression extension) encounter more classes. We want
to handle them right away, but we don't want them getting mixed
up with default args that are currently in the queue. */
parser->unparsed_functions_queues
= tree_cons (NULL_TREE, NULL_TREE, parser->unparsed_functions_queues);
/* Local variable names (and the `this' keyword) may not appear
in a default argument. */
saved_local_variables_forbidden_p = parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn));
parm;
parm = TREE_CHAIN (parm))
{
cp_token_cache *tokens;
tree default_arg = TREE_PURPOSE (parm);
tree parsed_arg;
VEC(tree,gc) *insts;
tree copy;
unsigned ix;
if (!default_arg)
continue;
if (TREE_CODE (default_arg) != DEFAULT_ARG)
/* This can happen for a friend declaration for a function
already declared with default arguments. */
continue;
/* Push the saved tokens for the default argument onto the parser's
lexer stack. */
tokens = DEFARG_TOKENS (default_arg);
cp_parser_push_lexer_for_tokens (parser, tokens);
/* Parse the assignment-expression. */
parsed_arg = cp_parser_assignment_expression (parser, /*cast_p=*/false);
if (!processing_template_decl)
parsed_arg = check_default_argument (TREE_VALUE (parm), parsed_arg);
TREE_PURPOSE (parm) = parsed_arg;
/* Update any instantiations we've already created. */
for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0;
VEC_iterate (tree, insts, ix, copy); ix++)
TREE_PURPOSE (copy) = parsed_arg;
/* If the token stream has not been completely used up, then
there was extra junk after the end of the default
argument. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF))
cp_parser_error (parser, "expected %<,%>");
/* Revert to the main lexer. */
cp_parser_pop_lexer (parser);
}
/* Make sure no default arg is missing. */
check_default_args (fn);
/* Restore the state of local_variables_forbidden_p. */
parser->local_variables_forbidden_p = saved_local_variables_forbidden_p;
/* Restore the queue. */
parser->unparsed_functions_queues
= TREE_CHAIN (parser->unparsed_functions_queues);
}
/* Parse the operand of `sizeof' (or a similar operator). Returns
either a TYPE or an expression, depending on the form of the
input. The KEYWORD indicates which kind of expression we have
encountered. */
static tree
cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword)
{
static const char *format;
tree expr = NULL_TREE;
const char *saved_message;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
/* Initialize FORMAT the first time we get here. */
if (!format)
format = "types may not be defined in '%s' expressions";
/* Types cannot be defined in a `sizeof' expression. Save away the
old message. */
saved_message = parser->type_definition_forbidden_message;
/* And create the new one. */
parser->type_definition_forbidden_message
= XNEWVEC (const char, strlen (format)
+ strlen (IDENTIFIER_POINTER (ridpointers[keyword]))
+ 1 /* `\0' */);
sprintf ((char *) parser->type_definition_forbidden_message,
format, IDENTIFIER_POINTER (ridpointers[keyword]));
/* The restrictions on constant-expressions do not apply inside
sizeof expressions. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
/* Do not actually evaluate the expression. */
++skip_evaluation;
/* If it's a `(', then we might be looking at the type-id
construction. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type;
bool saved_in_type_id_in_expr_p;
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-id. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Now, look for the trailing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, "%<)%>");
/* If all went well, then we're done. */
if (cp_parser_parse_definitely (parser))
{
cp_decl_specifier_seq decl_specs;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type;
/* Call grokdeclarator to figure out what type this is. */
expr = grokdeclarator (NULL,
&decl_specs,
TYPENAME,
/*initialized=*/0,
/*attrlist=*/NULL);
}
}
/* If the type-id production did not work out, then we must be
looking at the unary-expression production. */
if (!expr)
expr = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false);
/* Go back to evaluating expressions. */
--skip_evaluation;
/* Free the message we created. */
free ((char *) parser->type_definition_forbidden_message);
/* And restore the old one. */
parser->type_definition_forbidden_message = saved_message;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expr;
}
/* If the current declaration has no declarator, return true. */
static bool
cp_parser_declares_only_class_p (cp_parser *parser)
{
/* If the next token is a `;' or a `,' then there is no
declarator. */
return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
}
/* Update the DECL_SPECS to reflect the storage class indicated by
KEYWORD. */
static void
cp_parser_set_storage_class (cp_parser *parser,
cp_decl_specifier_seq *decl_specs,
enum rid keyword)
{
cp_storage_class storage_class;
if (parser->in_unbraced_linkage_specification_p)
{
error ("invalid use of %qD in linkage specification",
ridpointers[keyword]);
return;
}
else if (decl_specs->storage_class != sc_none)
{
decl_specs->conflicting_specifiers_p = true;
return;
}
if ((keyword == RID_EXTERN || keyword == RID_STATIC)
&& decl_specs->specs[(int) ds_thread])
{
error ("%<__thread%> before %qD", ridpointers[keyword]);
decl_specs->specs[(int) ds_thread] = 0;
}
switch (keyword)
{
case RID_AUTO:
storage_class = sc_auto;
break;
case RID_REGISTER:
storage_class = sc_register;
break;
case RID_STATIC:
storage_class = sc_static;
break;
case RID_EXTERN:
storage_class = sc_extern;
break;
case RID_MUTABLE:
storage_class = sc_mutable;
break;
default:
gcc_unreachable ();
}
decl_specs->storage_class = storage_class;
/* A storage class specifier cannot be applied alongside a typedef
specifier. If there is a typedef specifier present then set
conflicting_specifiers_p which will trigger an error later
on in grokdeclarator. */
if (decl_specs->specs[(int)ds_typedef])
decl_specs->conflicting_specifiers_p = true;
}
/* Update the DECL_SPECS to reflect the TYPE_SPEC. If USER_DEFINED_P
is true, the type is a user-defined type; otherwise it is a
built-in type specified by a keyword. */
static void
cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs,
tree type_spec,
bool user_defined_p)
{
decl_specs->any_specifiers_p = true;
/* If the user tries to redeclare bool or wchar_t (with, for
example, in "typedef int wchar_t;") we remember that this is what
happened. In system headers, we ignore these declarations so
that G++ can work with system headers that are not C++-safe. */
if (decl_specs->specs[(int) ds_typedef]
&& !user_defined_p
&& (type_spec == boolean_type_node
|| type_spec == wchar_type_node)
&& (decl_specs->type
|| decl_specs->specs[(int) ds_long]
|| decl_specs->specs[(int) ds_short]
|| decl_specs->specs[(int) ds_unsigned]
|| decl_specs->specs[(int) ds_signed]))
{
decl_specs->redefined_builtin_type = type_spec;
if (!decl_specs->type)
{
decl_specs->type = type_spec;
decl_specs->user_defined_type_p = false;
}
}
else if (decl_specs->type)
decl_specs->multiple_types_p = true;
else
{
decl_specs->type = type_spec;
decl_specs->user_defined_type_p = user_defined_p;
decl_specs->redefined_builtin_type = NULL_TREE;
}
}
/* DECL_SPECIFIERS is the representation of a decl-specifier-seq.
Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */
static bool
cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers)
{
return decl_specifiers->specs[(int) ds_friend] != 0;
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require (cp_parser* parser,
enum cpp_ttype type,
const char* token_desc)
{
if (cp_lexer_next_token_is (parser->lexer, type))
return cp_lexer_consume_token (parser->lexer);
else
{
/* Output the MESSAGE -- unless we're parsing tentatively. */
if (!cp_parser_simulate_error (parser))
{
char *message = concat ("expected ", token_desc, NULL);
cp_parser_error (parser, message);
free (message);
}
return NULL;
}
}
/* An error message is produced if the next token is not '>'.
All further tokens are skipped until the desired token is
found or '{', '}', ';' or an unbalanced ')' or ']'. */
static void
cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser)
{
/* Current level of '< ... >'. */
unsigned level = 0;
/* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */
unsigned nesting_depth = 0;
/* Are we ready, yet? If not, issue error message. */
if (cp_parser_require (parser, CPP_GREATER, "%<>%>"))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_LESS:
if (!nesting_depth)
++level;
break;
case CPP_GREATER:
if (!nesting_depth && level-- == 0)
{
/* We've reached the token we want, consume it and stop. */
cp_lexer_consume_token (parser->lexer);
return;
}
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
++nesting_depth;
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
if (nesting_depth-- == 0)
return;
break;
case CPP_EOF:
case CPP_PRAGMA_EOL:
case CPP_SEMICOLON:
case CPP_OPEN_BRACE:
case CPP_CLOSE_BRACE:
/* The '>' was probably forgotten, don't look further. */
return;
default:
break;
}
/* Consume this token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require_keyword (cp_parser* parser,
enum rid keyword,
const char* token_desc)
{
cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc);
if (token && token->keyword != keyword)
{
dyn_string_t error_msg;
/* Format the error message. */
error_msg = dyn_string_new (0);
dyn_string_append_cstr (error_msg, "expected ");
dyn_string_append_cstr (error_msg, token_desc);
cp_parser_error (parser, error_msg->s);
dyn_string_delete (error_msg);
return NULL;
}
return token;
}
/* Returns TRUE iff TOKEN is a token that can begin the body of a
function-definition. */
static bool
cp_parser_token_starts_function_definition_p (cp_token* token)
{
return (/* An ordinary function-body begins with an `{'. */
token->type == CPP_OPEN_BRACE
/* A ctor-initializer begins with a `:'. */
|| token->type == CPP_COLON
/* A function-try-block begins with `try'. */
|| token->keyword == RID_TRY
/* The named return value extension begins with `return'. */
|| token->keyword == RID_RETURN);
}
/* Returns TRUE iff the next token is the ":" or "{" beginning a class
definition. */
static bool
cp_parser_next_token_starts_class_definition_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_OPEN_BRACE || token->type == CPP_COLON);
}
/* Returns TRUE iff the next token is the "," or ">" ending a
template-argument. */
static bool
cp_parser_next_token_ends_template_argument_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_COMMA || token->type == CPP_GREATER);
}
/* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the
(n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */
static bool
cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser,
size_t n)
{
cp_token *token;
token = cp_lexer_peek_nth_token (parser->lexer, n);
if (token->type == CPP_LESS)
return true;
/* Check for the sequence `<::' in the original code. It would be lexed as
`[:', where `[' is a digraph, and there is no whitespace before
`:'. */
if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH)
{
cp_token *token2;
token2 = cp_lexer_peek_nth_token (parser->lexer, n+1);
if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE))
return true;
}
return false;
}
/* Returns the kind of tag indicated by TOKEN, if it is a class-key,
or none_type otherwise. */
static enum tag_types
cp_parser_token_is_class_key (cp_token* token)
{
switch (token->keyword)
{
case RID_CLASS:
return class_type;
case RID_STRUCT:
return record_type;
case RID_UNION:
return union_type;
default:
return none_type;
}
}
/* Issue an error message if the CLASS_KEY does not match the TYPE. */
static void
cp_parser_check_class_key (enum tag_types class_key, tree type)
{
if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type))
pedwarn ("%qs tag used in naming %q#T",
class_key == union_type ? "union"
: class_key == record_type ? "struct" : "class",
type);
}
/* Issue an error message if DECL is redeclared with different
access than its original declaration [class.access.spec/3].
This applies to nested classes and nested class templates.
[class.mem/1]. */
static void
cp_parser_check_access_in_redeclaration (tree decl)
{
if (!CLASS_TYPE_P (TREE_TYPE (decl)))
return;
if ((TREE_PRIVATE (decl)
!= (current_access_specifier == access_private_node))
|| (TREE_PROTECTED (decl)
!= (current_access_specifier == access_protected_node)))
error ("%qD redeclared with different access", decl);
}
/* Look for the `template' keyword, as a syntactic disambiguator.
Return TRUE iff it is present, in which case it will be
consumed. */
static bool
cp_parser_optional_template_keyword (cp_parser *parser)
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* The `template' keyword can only be used within templates;
outside templates the parser can always figure out what is a
template and what is not. */
if (!processing_template_decl)
{
error ("%<template%> (as a disambiguator) is only allowed "
"within templates");
/* If this part of the token stream is rescanned, the same
error message would be generated. So, we purge the token
from the stream. */
cp_lexer_purge_token (parser->lexer);
return false;
}
else
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
return true;
}
}
return false;
}
/* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token,
set PARSER->SCOPE, and perform other related actions. */
static void
cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser)
{
int i;
struct tree_check *check_value;
deferred_access_check *chk;
VEC (deferred_access_check,gc) *checks;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
checks = check_value->checks;
if (checks)
{
for (i = 0 ;
VEC_iterate (deferred_access_check, checks, i, chk) ;
++i)
{
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl);
}
}
/* Set the scope from the stored value. */
parser->scope = check_value->value;
parser->qualifying_scope = check_value->qualifying_scope;
parser->object_scope = NULL_TREE;
}
/* Consume tokens up through a non-nested END token. */
static void
cp_parser_cache_group (cp_parser *parser,
enum cpp_ttype end,
unsigned depth)
{
while (true)
{
cp_token *token;
/* Abort a parenthesized expression if we encounter a brace. */
if ((end == CPP_CLOSE_PAREN || depth == 0)
&& cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
return;
/* If we've reached the end of the file, stop. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)
|| (end != CPP_PRAGMA_EOL
&& cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA_EOL)))
return;
/* Consume the next token. */
token = cp_lexer_consume_token (parser->lexer);
/* See if it starts a new group. */
if (token->type == CPP_OPEN_BRACE)
{
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1);
if (depth == 0)
return;
}
else if (token->type == CPP_OPEN_PAREN)
cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1);
else if (token->type == CPP_PRAGMA)
cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1);
else if (token->type == end)
return;
}
}
/* Begin parsing tentatively. We always save tokens while parsing
tentatively so that if the tentative parsing fails we can restore the
tokens. */
static void
cp_parser_parse_tentatively (cp_parser* parser)
{
/* Enter a new parsing context. */
parser->context = cp_parser_context_new (parser->context);
/* Begin saving tokens. */
cp_lexer_save_tokens (parser->lexer);
/* In order to avoid repetitive access control error messages,
access checks are queued up until we are no longer parsing
tentatively. */
push_deferring_access_checks (dk_deferred);
}
/* Commit to the currently active tentative parse. */
static void
cp_parser_commit_to_tentative_parse (cp_parser* parser)
{
cp_parser_context *context;
cp_lexer *lexer;
/* Mark all of the levels as committed. */
lexer = parser->lexer;
for (context = parser->context; context->next; context = context->next)
{
if (context->status == CP_PARSER_STATUS_KIND_COMMITTED)
break;
context->status = CP_PARSER_STATUS_KIND_COMMITTED;
while (!cp_lexer_saving_tokens (lexer))
lexer = lexer->next;
cp_lexer_commit_tokens (lexer);
}
}
/* Abort the currently active tentative parse. All consumed tokens
will be rolled back, and no diagnostics will be issued. */
static void
cp_parser_abort_tentative_parse (cp_parser* parser)
{
cp_parser_simulate_error (parser);
/* Now, pretend that we want to see if the construct was
successfully parsed. */
cp_parser_parse_definitely (parser);
}
/* Stop parsing tentatively. If a parse error has occurred, restore the
token stream. Otherwise, commit to the tokens we have consumed.
Returns true if no error occurred; false otherwise. */
static bool
cp_parser_parse_definitely (cp_parser* parser)
{
bool error_occurred;
cp_parser_context *context;
/* Remember whether or not an error occurred, since we are about to
destroy that information. */
error_occurred = cp_parser_error_occurred (parser);
/* Remove the topmost context from the stack. */
context = parser->context;
parser->context = context->next;
/* If no parse errors occurred, commit to the tentative parse. */
if (!error_occurred)
{
/* Commit to the tokens read tentatively, unless that was
already done. */
if (context->status != CP_PARSER_STATUS_KIND_COMMITTED)
cp_lexer_commit_tokens (parser->lexer);
pop_to_parent_deferring_access_checks ();
}
/* Otherwise, if errors occurred, roll back our state so that things
are just as they were before we began the tentative parse. */
else
{
cp_lexer_rollback_tokens (parser->lexer);
pop_deferring_access_checks ();
}
/* Add the context to the front of the free list. */
context->next = cp_parser_context_free_list;
cp_parser_context_free_list = context;
return !error_occurred;
}
/* Returns true if we are parsing tentatively and are not committed to
this tentative parse. */
static bool
cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED);
}
/* Returns nonzero iff an error has occurred during the most recent
tentative parse. */
static bool
cp_parser_error_occurred (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status == CP_PARSER_STATUS_KIND_ERROR);
}
/* Returns nonzero if GNU extensions are allowed. */
static bool
cp_parser_allow_gnu_extensions_p (cp_parser* parser)
{
return parser->allow_gnu_extensions_p;
}
/* Objective-C++ Productions */
/* Parse an Objective-C expression, which feeds into a primary-expression
above.
objc-expression:
objc-message-expression
objc-string-literal
objc-encode-expression
objc-protocol-expression
objc-selector-expression
Returns a tree representation of the expression. */
static tree
cp_parser_objc_expression (cp_parser* parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->type)
{
case CPP_OPEN_SQUARE:
return cp_parser_objc_message_expression (parser);
case CPP_OBJC_STRING:
kwd = cp_lexer_consume_token (parser->lexer);
return objc_build_string_object (kwd->u.value);
case CPP_KEYWORD:
switch (kwd->keyword)
{
case RID_AT_ENCODE:
return cp_parser_objc_encode_expression (parser);
case RID_AT_PROTOCOL:
return cp_parser_objc_protocol_expression (parser);
case RID_AT_SELECTOR:
return cp_parser_objc_selector_expression (parser);
default:
break;
}
default:
error ("misplaced %<@%D%> Objective-C++ construct", kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* Parse an Objective-C message expression.
objc-message-expression:
[ objc-message-receiver objc-message-args ]
Returns a representation of an Objective-C message. */
static tree
cp_parser_objc_message_expression (cp_parser* parser)
{
tree receiver, messageargs;
cp_lexer_consume_token (parser->lexer); /* Eat '['. */
receiver = cp_parser_objc_message_receiver (parser);
messageargs = cp_parser_objc_message_args (parser);
cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'");
return objc_build_message_expr (build_tree_list (receiver, messageargs));
}
/* Parse an objc-message-receiver.
objc-message-receiver:
expression
simple-type-specifier
Returns a representation of the type or expression. */
static tree
cp_parser_objc_message_receiver (cp_parser* parser)
{
tree rcv;
/* An Objective-C message receiver may be either (1) a type
or (2) an expression. */
cp_parser_parse_tentatively (parser);
rcv = cp_parser_expression (parser, false);
if (cp_parser_parse_definitely (parser))
return rcv;
rcv = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
return objc_get_class_reference (rcv);
}
/* Parse the arguments and selectors comprising an Objective-C message.
objc-message-args:
objc-selector
objc-selector-args
objc-selector-args , objc-comma-args
objc-selector-args:
objc-selector [opt] : assignment-expression
objc-selector-args objc-selector [opt] : assignment-expression
objc-comma-args:
assignment-expression
objc-comma-args , assignment-expression
Returns a TREE_LIST, with TREE_PURPOSE containing a list of
selector arguments and TREE_VALUE containing a list of comma
arguments. */
static tree
cp_parser_objc_message_args (cp_parser* parser)
{
tree sel_args = NULL_TREE, addl_args = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, arg;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
return build_tree_list (selector, NULL_TREE);
maybe_unary_selector_p = false;
cp_parser_require (parser, CPP_COLON, "`:'");
arg = cp_parser_assignment_expression (parser, false);
sel_args
= chainon (sel_args,
build_tree_list (selector, arg));
token = cp_lexer_peek_token (parser->lexer);
}
/* Handle non-selector arguments, if any. */
while (token->type == CPP_COMMA)
{
tree arg;
cp_lexer_consume_token (parser->lexer);
arg = cp_parser_assignment_expression (parser, false);
addl_args
= chainon (addl_args,
build_tree_list (NULL_TREE, arg));
token = cp_lexer_peek_token (parser->lexer);
}
return build_tree_list (sel_args, addl_args);
}
/* Parse an Objective-C encode expression.
objc-encode-expression:
@encode objc-typename
Returns an encoded representation of the type argument. */
static tree
cp_parser_objc_encode_expression (cp_parser* parser)
{
tree type;
cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
type = complete_type (cp_parser_type_id (parser));
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
if (!type)
{
error ("%<@encode%> must specify a type as an argument");
return error_mark_node;
}
return objc_build_encode_expr (type);
}
/* Parse an Objective-C @defs expression. */
static tree
cp_parser_objc_defs_expression (cp_parser *parser)
{
tree name;
cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
name = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
return objc_get_class_ivars (name);
}
/* Parse an Objective-C protocol expression.
objc-protocol-expression:
@protocol ( identifier )
Returns a representation of the protocol expression. */
static tree
cp_parser_objc_protocol_expression (cp_parser* parser)
{
tree proto;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
proto = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
return objc_build_protocol_expr (proto);
}
/* Parse an Objective-C selector expression.
objc-selector-expression:
@selector ( objc-method-signature )
objc-method-signature:
objc-selector
objc-selector-seq
objc-selector-seq:
objc-selector :
objc-selector-seq objc-selector :
Returns a representation of the method selector. */
static tree
cp_parser_objc_selector_expression (cp_parser* parser)
{
tree sel_seq = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token;
cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON
|| token->type == CPP_SCOPE)
{
tree selector = NULL_TREE;
if (token->type != CPP_COLON
|| token->type == CPP_SCOPE)
selector = cp_parser_objc_selector (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE))
{
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p)
{
sel_seq = selector;
goto finish_selector;
}
else
{
cp_parser_error (parser, "expected %<:%>");
}
}
maybe_unary_selector_p = false;
token = cp_lexer_consume_token (parser->lexer);
if (token->type == CPP_SCOPE)
{
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
sel_seq
= chainon (sel_seq,
build_tree_list (NULL_TREE, NULL_TREE));
}
else
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
token = cp_lexer_peek_token (parser->lexer);
}
finish_selector:
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
return objc_build_selector_expr (sel_seq);
}
/* Parse a list of identifiers.
objc-identifier-list:
identifier
objc-identifier-list , identifier
Returns a TREE_LIST of identifier nodes. */
static tree
cp_parser_objc_identifier_list (cp_parser* parser)
{
tree list = build_tree_list (NULL_TREE, cp_parser_identifier (parser));
cp_token *sep = cp_lexer_peek_token (parser->lexer);
while (sep->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
list = chainon (list,
build_tree_list (NULL_TREE,
cp_parser_identifier (parser)));
sep = cp_lexer_peek_token (parser->lexer);
}
return list;
}
/* Parse an Objective-C alias declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
This function registers the alias mapping with the Objective-C front-end.
It returns nothing. */
static void
cp_parser_objc_alias_declaration (cp_parser* parser)
{
tree alias, orig;
cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */
alias = cp_parser_identifier (parser);
orig = cp_parser_identifier (parser);
objc_declare_alias (alias, orig);
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an Objective-C class forward-declaration.
objc-class-declaration:
@class objc-identifier-list ;
The function registers the forward declarations with the Objective-C
front-end. It returns nothing. */
static void
cp_parser_objc_class_declaration (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */
objc_declare_class (cp_parser_objc_identifier_list (parser));
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse a list of Objective-C protocol references.
objc-protocol-refs-opt:
objc-protocol-refs [opt]
objc-protocol-refs:
< objc-identifier-list >
Returns a TREE_LIST of identifiers, if any. */
static tree
cp_parser_objc_protocol_refs_opt (cp_parser* parser)
{
tree protorefs = NULL_TREE;
if(cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
cp_lexer_consume_token (parser->lexer); /* Eat '<'. */
protorefs = cp_parser_objc_identifier_list (parser);
cp_parser_require (parser, CPP_GREATER, "`>'");
}
return protorefs;
}
/* Parse a Objective-C visibility specification. */
static void
cp_parser_objc_visibility_spec (cp_parser* parser)
{
cp_token *vis = cp_lexer_peek_token (parser->lexer);
switch (vis->keyword)
{
case RID_AT_PRIVATE:
objc_set_visibility (2);
break;
case RID_AT_PROTECTED:
objc_set_visibility (0);
break;
case RID_AT_PUBLIC:
objc_set_visibility (1);
break;
default:
return;
}
/* Eat '@private'/'@protected'/'@public'. */
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C method type. */
static void
cp_parser_objc_method_type (cp_parser* parser)
{
objc_set_method_type
(cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS
? PLUS_EXPR
: MINUS_EXPR);
}
/* Parse an Objective-C protocol qualifier. */
static tree
cp_parser_objc_protocol_qualifiers (cp_parser* parser)
{
tree quals = NULL_TREE, node;
cp_token *token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
while (node && TREE_CODE (node) == IDENTIFIER_NODE
&& (node == ridpointers [(int) RID_IN]
|| node == ridpointers [(int) RID_OUT]
|| node == ridpointers [(int) RID_INOUT]
|| node == ridpointers [(int) RID_BYCOPY]
|| node == ridpointers [(int) RID_BYREF]
|| node == ridpointers [(int) RID_ONEWAY]))
{
quals = tree_cons (NULL_TREE, node, quals);
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
}
return quals;
}
/* Parse an Objective-C typename. */
static tree
cp_parser_objc_typename (cp_parser* parser)
{
tree typename = NULL_TREE;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree proto_quals, cp_type = NULL_TREE;
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
proto_quals = cp_parser_objc_protocol_qualifiers (parser);
/* An ObjC type name may consist of just protocol qualifiers, in which
case the type shall default to 'id'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
cp_type = cp_parser_type_id (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
typename = build_tree_list (proto_quals, cp_type);
}
return typename;
}
/* Check to see if TYPE refers to an Objective-C selector name. */
static bool
cp_parser_objc_selector_p (enum cpp_ttype type)
{
return (type == CPP_NAME || type == CPP_KEYWORD
|| type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND
|| type == CPP_OR || type == CPP_COMPL || type == CPP_NOT
|| type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ
|| type == CPP_XOR || type == CPP_XOR_EQ);
}
/* Parse an Objective-C selector. */
static tree
cp_parser_objc_selector (cp_parser* parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
if (!cp_parser_objc_selector_p (token->type))
{
error ("invalid Objective-C++ selector name");
return error_mark_node;
}
/* C++ operator names are allowed to appear in ObjC selectors. */
switch (token->type)
{
case CPP_AND_AND: return get_identifier ("and");
case CPP_AND_EQ: return get_identifier ("and_eq");
case CPP_AND: return get_identifier ("bitand");
case CPP_OR: return get_identifier ("bitor");
case CPP_COMPL: return get_identifier ("compl");
case CPP_NOT: return get_identifier ("not");
case CPP_NOT_EQ: return get_identifier ("not_eq");
case CPP_OR_OR: return get_identifier ("or");
case CPP_OR_EQ: return get_identifier ("or_eq");
case CPP_XOR: return get_identifier ("xor");
case CPP_XOR_EQ: return get_identifier ("xor_eq");
default: return token->u.value;
}
}
/* Parse an Objective-C params list. */
static tree
cp_parser_objc_method_keyword_params (cp_parser* parser)
{
tree params = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, typename, identifier;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
return selector;
maybe_unary_selector_p = false;
cp_parser_require (parser, CPP_COLON, "`:'");
typename = cp_parser_objc_typename (parser);
identifier = cp_parser_identifier (parser);
params
= chainon (params,
objc_build_keyword_decl (selector,
typename,
identifier));
token = cp_lexer_peek_token (parser->lexer);
}
return params;
}
/* Parse the non-keyword Objective-C params. */
static tree
cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp)
{
tree params = make_node (TREE_LIST);
cp_token *token = cp_lexer_peek_token (parser->lexer);
*ellipsisp = false; /* Initially, assume no ellipsis. */
while (token->type == CPP_COMMA)
{
cp_parameter_declarator *parmdecl;
tree parm;
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_ELLIPSIS)
{
cp_lexer_consume_token (parser->lexer); /* Eat '...'. */
*ellipsisp = true;
break;
}
parmdecl = cp_parser_parameter_declaration (parser, false, NULL);
parm = grokdeclarator (parmdecl->declarator,
&parmdecl->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
chainon (params, build_tree_list (NULL_TREE, parm));
token = cp_lexer_peek_token (parser->lexer);
}
return params;
}
/* Parse a linkage specification, a pragma, an extra semicolon or a block. */
static void
cp_parser_objc_interstitial_code (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token->keyword == RID_EXTERN
&& cp_parser_is_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2)))
cp_parser_linkage_specification (parser);
/* Handle #pragma, if any. */
else if (token->type == CPP_PRAGMA)
cp_parser_pragma (parser, pragma_external);
/* Allow stray semicolons. */
else if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
/* Finally, try to parse a block-declaration, or a function-definition. */
else
cp_parser_block_declaration (parser, /*statement_p=*/false);
}
/* Parse a method signature. */
static tree
cp_parser_objc_method_signature (cp_parser* parser)
{
tree rettype, kwdparms, optparms;
bool ellipsis = false;
cp_parser_objc_method_type (parser);
rettype = cp_parser_objc_typename (parser);
kwdparms = cp_parser_objc_method_keyword_params (parser);
optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis);
return objc_build_method_signature (rettype, kwdparms, optparms, ellipsis);
}
/* Pars an Objective-C method prototype list. */
static void
cp_parser_objc_method_prototype_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END)
{
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
objc_add_method_declaration
(cp_parser_objc_method_signature (parser));
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
objc_finish_interface ();
}
/* Parse an Objective-C method definition list. */
static void
cp_parser_objc_method_definition_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END)
{
tree meth;
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
push_deferring_access_checks (dk_deferred);
objc_start_method_definition
(cp_parser_objc_method_signature (parser));
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
perform_deferred_access_checks ();
stop_deferring_access_checks ();
meth = cp_parser_function_definition_after_declarator (parser,
false);
pop_deferring_access_checks ();
objc_finish_method_definition (meth);
}
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
objc_finish_implementation ();
}
/* Parse Objective-C ivars. */
static void
cp_parser_objc_class_ivars (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_OPEN_BRACE)
return; /* No ivars specified. */
cp_lexer_consume_token (parser->lexer); /* Eat '{'. */
token = cp_lexer_peek_token (parser->lexer);
while (token->type != CPP_CLOSE_BRACE)
{
cp_decl_specifier_seq declspecs;
int decl_class_or_enum_p;
tree prefix_attributes;
cp_parser_objc_visibility_spec (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
break;
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&declspecs,
&decl_class_or_enum_p);
prefix_attributes = declspecs.attributes;
declspecs.attributes = NULL_TREE;
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree width = NULL_TREE, attributes, first_attribute, decl;
cp_declarator *declarator = NULL;
int ctor_dtor_or_conv_p;
/* Check for a (possibly unnamed) bitfield declaration. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COLON)
goto eat_colon;
if (token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
/* Get the name of the bitfield. */
declarator = make_id_declarator (NULL_TREE,
cp_parser_identifier (parser),
sfk_none);
eat_colon:
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
}
else
{
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
}
/* Look for attributes that apply to the ivar. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
if (width)
{
/* Create the bitfield declaration. */
decl = grokbitfield (declarator, &declspecs, width);
cplus_decl_attributes (&decl, attributes, /*flags=*/0);
}
else
decl = grokfield (declarator, &declspecs,
NULL_TREE, /*init_const_expr_p=*/false,
NULL_TREE, attributes);
/* Add the instance variable. */
objc_add_instance_variable (decl);
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
continue;
}
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
}
cp_lexer_consume_token (parser->lexer); /* Eat '}'. */
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C protocol declaration. */
static void
cp_parser_objc_protocol_declaration (cp_parser* parser)
{
tree proto, protorefs;
cp_token *tok;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME))
{
error ("identifier expected after %<@protocol%>");
goto finish;
}
/* See if we have a forward declaration or a definition. */
tok = cp_lexer_peek_nth_token (parser->lexer, 2);
/* Try a forward declaration first. */
if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON)
{
objc_declare_protocols (cp_parser_objc_identifier_list (parser));
finish:
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Ok, we got a full-fledged definition (or at least should). */
else
{
proto = cp_parser_identifier (parser);
protorefs = cp_parser_objc_protocol_refs_opt (parser);
objc_start_protocol (proto, protorefs);
cp_parser_objc_method_prototype_list (parser);
}
}
/* Parse an Objective-C superclass or category. */
static void
cp_parser_objc_superclass_or_category (cp_parser *parser, tree *super,
tree *categ)
{
cp_token *next = cp_lexer_peek_token (parser->lexer);
*super = *categ = NULL_TREE;
if (next->type == CPP_COLON)
{
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
*super = cp_parser_identifier (parser);
}
else if (next->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
*categ = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
}
}
/* Parse an Objective-C class interface. */
static void
cp_parser_objc_class_interface (cp_parser* parser)
{
tree name, super, categ, protos;
cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */
name = cp_parser_identifier (parser);
cp_parser_objc_superclass_or_category (parser, &super, &categ);
protos = cp_parser_objc_protocol_refs_opt (parser);
/* We have either a class or a category on our hands. */
if (categ)
objc_start_category_interface (name, categ, protos);
else
{
objc_start_class_interface (name, super, protos);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_interface ();
}
cp_parser_objc_method_prototype_list (parser);
}
/* Parse an Objective-C class implementation. */
static void
cp_parser_objc_class_implementation (cp_parser* parser)
{
tree name, super, categ;
cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */
name = cp_parser_identifier (parser);
cp_parser_objc_superclass_or_category (parser, &super, &categ);
/* We have either a class or a category on our hands. */
if (categ)
objc_start_category_implementation (name, categ);
else
{
objc_start_class_implementation (name, super);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_implementation ();
}
cp_parser_objc_method_definition_list (parser);
}
/* Consume the @end token and finish off the implementation. */
static void
cp_parser_objc_end_implementation (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
objc_finish_implementation ();
}
/* Parse an Objective-C declaration. */
static void
cp_parser_objc_declaration (cp_parser* parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->keyword)
{
case RID_AT_ALIAS:
cp_parser_objc_alias_declaration (parser);
break;
case RID_AT_CLASS:
cp_parser_objc_class_declaration (parser);
break;
case RID_AT_PROTOCOL:
cp_parser_objc_protocol_declaration (parser);
break;
case RID_AT_INTERFACE:
cp_parser_objc_class_interface (parser);
break;
case RID_AT_IMPLEMENTATION:
cp_parser_objc_class_implementation (parser);
break;
case RID_AT_END:
cp_parser_objc_end_implementation (parser);
break;
default:
error ("misplaced %<@%D%> Objective-C++ construct", kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
}
/* Parse an Objective-C try-catch-finally statement.
objc-try-catch-finally-stmt:
@try compound-statement objc-catch-clause-seq [opt]
objc-finally-clause [opt]
objc-catch-clause-seq:
objc-catch-clause objc-catch-clause-seq [opt]
objc-catch-clause:
@catch ( exception-declaration ) compound-statement
objc-finally-clause
@finally compound-statement
Returns NULL_TREE. */
static tree
cp_parser_objc_try_catch_finally_statement (cp_parser *parser) {
location_t location;
tree stmt;
cp_parser_require_keyword (parser, RID_AT_TRY, "`@try'");
location = cp_lexer_peek_token (parser->lexer)->location;
/* NB: The @try block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false);
objc_begin_try_stmt (location, pop_stmt_list (stmt));
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH))
{
cp_parameter_declarator *parmdecl;
tree parm;
cp_lexer_consume_token (parser->lexer);
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
parmdecl = cp_parser_parameter_declaration (parser, false, NULL);
parm = grokdeclarator (parmdecl->declarator,
&parmdecl->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
objc_begin_catch_clause (parm);
cp_parser_compound_statement (parser, NULL, false);
objc_finish_catch_clause ();
}
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY))
{
cp_lexer_consume_token (parser->lexer);
location = cp_lexer_peek_token (parser->lexer)->location;
/* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false);
objc_build_finally_clause (location, pop_stmt_list (stmt));
}
return objc_finish_try_stmt ();
}
/* Parse an Objective-C synchronized statement.
objc-synchronized-stmt:
@synchronized ( expression ) compound-statement
Returns NULL_TREE. */
static tree
cp_parser_objc_synchronized_statement (cp_parser *parser) {
location_t location;
tree lock, stmt;
cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, "`@synchronized'");
location = cp_lexer_peek_token (parser->lexer)->location;
cp_parser_require (parser, CPP_OPEN_PAREN, "`('");
lock = cp_parser_expression (parser, false);
cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'");
/* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false);
return objc_build_synchronized (location, lock, pop_stmt_list (stmt));
}
/* Parse an Objective-C throw statement.
objc-throw-stmt:
@throw assignment-expression [opt] ;
Returns a constructed '@throw' statement. */
static tree
cp_parser_objc_throw_statement (cp_parser *parser) {
tree expr = NULL_TREE;
cp_parser_require_keyword (parser, RID_AT_THROW, "`@throw'");
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_assignment_expression (parser, false);
cp_parser_consume_semicolon_at_end_of_statement (parser);
return objc_build_throw_stmt (expr);
}
/* Parse an Objective-C statement. */
static tree
cp_parser_objc_statement (cp_parser * parser) {
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->keyword)
{
case RID_AT_TRY:
return cp_parser_objc_try_catch_finally_statement (parser);
case RID_AT_SYNCHRONIZED:
return cp_parser_objc_synchronized_statement (parser);
case RID_AT_THROW:
return cp_parser_objc_throw_statement (parser);
default:
error ("misplaced %<@%D%> Objective-C++ construct", kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* OpenMP 2.5 parsing routines. */
/* All OpenMP clauses. OpenMP 2.5. */
typedef enum pragma_omp_clause {
PRAGMA_OMP_CLAUSE_NONE = 0,
PRAGMA_OMP_CLAUSE_COPYIN,
PRAGMA_OMP_CLAUSE_COPYPRIVATE,
PRAGMA_OMP_CLAUSE_DEFAULT,
PRAGMA_OMP_CLAUSE_FIRSTPRIVATE,
PRAGMA_OMP_CLAUSE_IF,
PRAGMA_OMP_CLAUSE_LASTPRIVATE,
PRAGMA_OMP_CLAUSE_NOWAIT,
PRAGMA_OMP_CLAUSE_NUM_THREADS,
PRAGMA_OMP_CLAUSE_ORDERED,
PRAGMA_OMP_CLAUSE_PRIVATE,
PRAGMA_OMP_CLAUSE_REDUCTION,
PRAGMA_OMP_CLAUSE_SCHEDULE,
PRAGMA_OMP_CLAUSE_SHARED,
PRAGMA_OMP_CLAUSE_COLLAPSE,
PRAGMA_OMP_CLAUSE_UNTIED
} pragma_omp_clause;
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
cp_parser_omp_clause_name (cp_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
break;
case 'f':
if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
break;
case 'n':
if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
case 'u':
if (!strcmp ("untied", p))
result = PRAGMA_OMP_CLAUSE_UNTIED;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
cp_lexer_consume_token (parser->lexer);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum tree_code code, const char *name)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
error ("too many %qs clauses", name);
break;
}
}
/* OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
In addition, we match a closing parenthesis. An opening parenthesis
will have been consumed by the caller.
If KIND is nonzero, create the appropriate node and install the decl
in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created. */
static tree
cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
tree list)
{
while (1)
{
tree name, decl;
name = cp_parser_id_expression (parser, /*template_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
if (name == error_mark_node)
goto skip_comma;
decl = cp_parser_lookup_name_simple (parser, name);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, name, decl, NULL);
else if (kind != 0)
{
tree u = build_omp_clause (kind);
OMP_CLAUSE_DECL (u) = decl;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
else
list = tree_cons (decl, NULL_TREE, list);
get_comma:
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
cp_lexer_consume_token (parser->lexer);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
{
int ending;
/* Try to resync to an unnested comma. Copied from
cp_parser_parenthesized_expression_list. */
skip_comma:
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for omp clauses. */
static tree
cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list)
{
if (cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
return cp_parser_omp_var_list_no_open (parser, kind, list);
return list;
}
/* OpenMP 3.0:
collapse ( constant-expression ) */
static tree
cp_parser_omp_clause_collapse (cp_parser *parser, tree list)
{
tree c, num;
location_t loc;
HOST_WIDE_INT n;
loc = cp_lexer_peek_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "%<(%>"))
return list;
num = cp_parser_constant_expression (parser, false, NULL);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "%<)%>"))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (num == error_mark_node)
return list;
num = fold_non_dependent_expr (num);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !host_integerp (num, 0)
|| (n = tree_low_cst (num, 0)) <= 0
|| (int) n != n)
{
error ("%Hcollapse argument needs positive constant integer expression", &loc);
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
c = build_omp_clause (OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
return c;
}
/* OpenMP 2.5:
default ( shared | none ) */
static tree
cp_parser_omp_clause_default (cp_parser *parser, tree list)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
tree c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
return list;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 's':
if (strcmp ("shared", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
cp_lexer_consume_token (parser->lexer);
}
else
{
invalid_kind:
cp_parser_error (parser, "expected %<none%> or %<shared%>");
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default");
c = build_omp_clause (OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 2.5:
if ( expression ) */
static tree
cp_parser_omp_clause_if (cp_parser *parser, tree list)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if");
c = build_omp_clause (OMP_CLAUSE_IF);
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
nowait */
static tree
cp_parser_omp_clause_nowait (cp_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait");
c = build_omp_clause (OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
cp_parser_omp_clause_num_threads (cp_parser *parser, tree list)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
return list;
t = cp_parser_expression (parser, false);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads");
c = build_omp_clause (OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
ordered */
static tree
cp_parser_omp_clause_ordered (cp_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered");
c = build_omp_clause (OMP_CLAUSE_ORDERED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && || */
static tree
cp_parser_omp_clause_reduction (cp_parser *parser, tree list)
{
enum tree_code code;
tree nlist, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('"))
return list;
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
default:
cp_parser_error (parser, "`+', `*', `-', `&', `^', `|', `&&', or `||'");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_COLON, "`:'"))
goto resync_fail;
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list);
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_REDUCTION_CODE (c) = code;
return nlist;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime | auto */
static tree
cp_parser_omp_clause_schedule (cp_parser *parser, tree list)
{
tree c, t;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
c = build_omp_clause (OMP_CLAUSE_SCHEDULE);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_lexer_consume_token (parser->lexer);
t = cp_parser_assignment_expression (parser, false);
if (t == error_mark_node)
goto resync_fail;
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error ("schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
error ("schedule %<auto%> does not take "
"a %<chunk_size%> parameter");
else
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
goto resync_fail;
}
else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`,' or `)'"))
goto resync_fail;
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule");
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
cp_parser_error (parser, "invalid schedule kind");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
/* OpenMP 3.0:
untied */
static tree
cp_parser_omp_clause_untied (cp_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied");
c = build_omp_clause (OMP_CLAUSE_UNTIED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
static tree
cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask,
const char *where, cp_token *pragma_tok)
{
tree clauses = NULL;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
{
pragma_omp_clause c_kind = cp_parser_omp_clause_name (parser);
const char *c_name;
tree prev = clauses;
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = cp_parser_omp_clause_collapse (parser, clauses);
c_name = "collapse";
break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE,
clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = cp_parser_omp_clause_default (parser, clauses);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE,
clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = cp_parser_omp_clause_if (parser, clauses);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE,
clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = cp_parser_omp_clause_nowait (parser, clauses);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = cp_parser_omp_clause_num_threads (parser, clauses);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = cp_parser_omp_clause_ordered (parser, clauses);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE,
clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = cp_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = cp_parser_omp_clause_schedule (parser, clauses);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED,
clauses);
c_name = "shared";
break;
case PRAGMA_OMP_CLAUSE_UNTIED:
clauses = cp_parser_omp_clause_untied (parser, clauses);
c_name = "nowait";
break;
default:
cp_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error ("%qs is not valid for %qs", c_name, where);
}
}
saw_error:
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return finish_omp_clauses (clauses);
}
/* OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
cp_parser_statement calls add_stmt. */
static unsigned
cp_parser_begin_omp_structured_block (cp_parser *parser)
{
unsigned save = parser->in_statement;
/* Only move the values to IN_OMP_BLOCK if they weren't false.
This preserves the "not within loop or switch" style error messages
for nonsense cases like
void foo() {
#pragma omp single
break;
}
*/
if (parser->in_statement)
parser->in_statement = IN_OMP_BLOCK;
return save;
}
static void
cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save)
{
parser->in_statement = save;
}
static tree
cp_parser_omp_structured_block (cp_parser *parser)
{
tree stmt = begin_omp_structured_block ();
unsigned int save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_structured_block (stmt);
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type. */
static void
cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok)
{
tree lhs, rhs;
enum tree_code code;
cp_parser_require_pragma_eol (parser, pragma_tok);
lhs = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false);
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
goto saw_error;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = PLUS_EXPR;
rhs = integer_one_node;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = MINUS_EXPR;
rhs = integer_one_node;
break;
default:
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
default:
cp_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_expression (parser, false);
if (rhs == error_mark_node)
goto saw_error;
break;
}
finish_omp_atomic (code, lhs, rhs);
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
saw_error:
cp_parser_skip_to_end_of_block_or_statement (parser);
}
/* OpenMP 2.5:
# pragma omp barrier new-line */
static void
cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_barrier ();
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block */
static tree
cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, name = NULL;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
name = cp_parser_identifier (parser);
if (name == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (name == error_mark_node)
name = NULL;
}
cp_parser_require_pragma_eol (parser, pragma_tok);
stmt = cp_parser_omp_structured_block (parser);
return c_finish_omp_critical (stmt, name);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
(void) cp_parser_omp_var_list (parser, 0, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_flush ();
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_cond (cp_parser *parser, tree decl)
{
tree lhs = cp_parser_cast_expression (parser, false, false), rhs;
enum tree_code op;
cp_token *token;
if (lhs != decl)
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
token = cp_lexer_peek_token (parser->lexer);
op = binops_by_token [token->type].tree_type;
switch (op)
{
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
break;
default:
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false,
PREC_RELATIONAL_EXPRESSION);
if (rhs == error_mark_node
|| cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
return build2 (op, boolean_type_node, lhs, rhs);
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_incr (cp_parser *parser, tree decl)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
enum tree_code op;
tree lhs, rhs;
cp_id_kind idk;
bool decl_first;
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? PREINCREMENT_EXPR : PREDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
lhs = cp_parser_cast_expression (parser, false, false);
if (lhs != decl)
return error_mark_node;
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
lhs = cp_parser_primary_expression (parser, false, false, false, &idk);
if (lhs != decl)
return error_mark_node;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
op = cp_parser_assignment_operator_opt (parser);
if (op == ERROR_MARK)
return error_mark_node;
if (op != NOP_EXPR)
{
rhs = cp_parser_assignment_expression (parser, false);
rhs = build2 (op, TREE_TYPE (decl), decl, rhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
lhs = cp_parser_binary_expression (parser, false,
PREC_ADDITIVE_EXPRESSION);
token = cp_lexer_peek_token (parser->lexer);
decl_first = lhs == decl;
if (decl_first)
lhs = NULL_TREE;
if (token->type != CPP_PLUS
&& token->type != CPP_MINUS)
return error_mark_node;
do
{
op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR;
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false,
PREC_ADDITIVE_EXPRESSION);
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first)
{
if (lhs == NULL_TREE)
{
if (op == PLUS_EXPR)
lhs = rhs;
else
lhs = build_x_unary_op (NEGATE_EXPR, rhs);
}
else
lhs = build_x_binary_op (op, lhs, rhs, NULL);
}
}
while (token->type == CPP_PLUS || token->type == CPP_MINUS);
if (!decl_first)
{
if (rhs != decl || op == MINUS_EXPR)
return error_mark_node;
rhs = build2 (op, TREE_TYPE (decl), lhs, decl);
}
else
rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
/* Parse the restricted form of the for statment allowed by OpenMP. */
static tree
cp_parser_omp_for_loop (cp_parser *parser, tree clauses, tree *par_clauses)
{
tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret;
tree for_block = NULL_TREE, real_decl, initv, condv, incrv, declv;
tree this_pre_body, cl;
location_t loc_first;
bool collapse_err = false;
int i, collapse = 1, nbraces = 0;
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0);
gcc_assert (collapse >= 1);
declv = make_tree_vec (collapse);
initv = make_tree_vec (collapse);
condv = make_tree_vec (collapse);
incrv = make_tree_vec (collapse);
loc_first = cp_lexer_peek_token (parser->lexer)->location;
for (i = 0; i < collapse; i++)
{
int bracecount = 0;
bool add_private_clause = false;
location_t loc;
if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
cp_parser_error (parser, "for statement expected");
return NULL;
}
loc = cp_lexer_consume_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, "%<(%>"))
return NULL;
init = decl = real_decl = NULL;
this_pre_body = push_stmt_list ();
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_decl_specifier_seq type_specifiers;
/* First, try to parse as an initialized declaration. See
cp_parser_condition, from whence the bulk of this is copied. */
cp_parser_parse_tentatively (parser);
cp_parser_type_specifier_seq (parser, /*is_condition=*/false,
&type_specifiers);
if (!cp_parser_error_occurred (parser))
{
tree asm_specification, attributes;
cp_declarator *declarator;
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
attributes = cp_parser_attributes_opt (parser);
asm_specification = cp_parser_asm_specification_opt (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
cp_parser_require (parser, CPP_EQ, "%<=%>");
if (cp_parser_parse_definitely (parser))
{
tree pushed_scope;
decl = start_decl (declarator, &type_specifiers,
/*initialized_p=*/false, attributes,
/*prefix_attributes=*/NULL_TREE,
&pushed_scope);
if (CLASS_TYPE_P (TREE_TYPE (decl))
|| type_dependent_expression_p (decl))
{
bool is_parenthesized_init, is_non_constant_init;
init = cp_parser_initializer (parser,
&is_parenthesized_init,
&is_non_constant_init);
cp_finish_decl (decl, init, !is_non_constant_init,
asm_specification,
LOOKUP_ONLYCONVERTING);
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
for_block
= tree_cons (NULL, this_pre_body, for_block);
init = NULL_TREE;
}
else
init = pop_stmt_list (this_pre_body);
this_pre_body = NULL_TREE;
}
else
{
cp_parser_require (parser, CPP_EQ, "%<=%>");
init = cp_parser_assignment_expression (parser, false);
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
init = error_mark_node;
else
cp_finish_decl (decl, NULL_TREE,
/*init_const_expr_p=*/false,
asm_specification,
LOOKUP_ONLYCONVERTING);
}
if (pushed_scope)
pop_scope (pushed_scope);
}
}
else
cp_parser_abort_tentative_parse (parser);
/* If parsing as an initialized declaration failed, try again as
a simple expression. */
if (decl == NULL)
{
cp_id_kind idk;
cp_parser_parse_tentatively (parser);
decl = cp_parser_primary_expression (parser, false, false,
false, &idk);
if (!cp_parser_error_occurred (parser)
&& decl
&& DECL_P (decl)
&& CLASS_TYPE_P (TREE_TYPE (decl)))
{
tree rhs;
cp_parser_parse_definitely (parser);
cp_parser_require (parser, CPP_EQ, "%<=%>");
rhs = cp_parser_assignment_expression (parser, false);
finish_expr_stmt (build_x_modify_expr (decl, NOP_EXPR,
rhs));
add_private_clause = true;
}
else
{
decl = NULL;
cp_parser_abort_tentative_parse (parser);
init = cp_parser_expression (parser, false);
if (init)
{
if (TREE_CODE (init) == MODIFY_EXPR
|| TREE_CODE (init) == MODOP_EXPR)
real_decl = TREE_OPERAND (init, 0);
}
}
}
}
cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
if (this_pre_body)
{
this_pre_body = pop_stmt_list (this_pre_body);
if (pre_body)
{
tree t = pre_body;
pre_body = push_stmt_list ();
add_stmt (t);
add_stmt (this_pre_body);
pre_body = pop_stmt_list (pre_body);
}
else
pre_body = this_pre_body;
}
if (decl)
real_decl = decl;
if (par_clauses != NULL && real_decl != NULL_TREE)
{
tree *c;
for (c = par_clauses; *c ; )
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
error ("%Hiteration variable %qD should not be firstprivate",
&loc, real_decl);
*c = OMP_CLAUSE_CHAIN (*c);
}
else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
/* Add lastprivate (decl) clause to OMP_FOR_CLAUSES,
change it to shared (decl) in OMP_PARALLEL_CLAUSES. */
tree l = build_omp_clause (OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (l) = real_decl;
OMP_CLAUSE_CHAIN (l) = clauses;
CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c);
clauses = l;
OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED);
CP_OMP_CLAUSE_INFO (*c) = NULL;
add_private_clause = false;
}
else
{
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
add_private_clause = false;
c = &OMP_CLAUSE_CHAIN (*c);
}
}
if (add_private_clause)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& OMP_CLAUSE_DECL (c) == decl)
break;
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (c) == decl)
error ("%Hiteration variable %qD should not be firstprivate",
&loc, decl);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_DECL (c) == decl)
error ("%Hiteration variable %qD should not be reduction",
&loc, decl);
}
if (c == NULL)
{
c = build_omp_clause (OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
c = finish_omp_clauses (c);
if (c)
{
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
}
}
cond = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
/* If decl is an iterator, preserve LHS and RHS of the relational
expr until finish_omp_for. */
if (decl
&& (type_dependent_expression_p (decl)
|| CLASS_TYPE_P (TREE_TYPE (decl))))
cond = cp_parser_omp_for_cond (parser, decl);
else
cond = cp_parser_condition (parser);
}
cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
incr = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
{
/* If decl is an iterator, preserve the operator on decl
until finish_omp_for. */
if (decl
&& (type_dependent_expression_p (decl)
|| CLASS_TYPE_P (TREE_TYPE (decl))))
incr = cp_parser_omp_for_incr (parser, decl);
else
incr = cp_parser_expression (parser, false);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
if (i == collapse - 1)
break;
/* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
in between the collapsed for loops to be still considered perfectly
nested. Hopefully the final version clarifies this.
For now handle (multiple) {'s and empty statements. */
cp_parser_parse_tentatively (parser);
do
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_consume_token (parser->lexer);
bracecount++;
}
else if (bracecount
&& cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
loc = cp_lexer_peek_token (parser->lexer)->location;
error ("%Hnot enough collapsed for loops", &loc);
collapse_err = true;
cp_parser_abort_tentative_parse (parser);
declv = NULL_TREE;
break;
}
}
while (1);
if (declv)
{
cp_parser_parse_definitely (parser);
nbraces += bracecount;
}
}
/* Note that we saved the original contents of this flag when we entered
the structured block, and so we don't need to re-save it here. */
parser->in_statement = IN_OMP_FOR;
/* Note that the grammar doesn't call for a structured block here,
though the loop as a whole is a structured block. */
body = push_stmt_list ();
cp_parser_statement (parser, NULL_TREE, false);
body = pop_stmt_list (body);
if (declv == NULL_TREE)
ret = NULL_TREE;
else
ret = finish_omp_for (loc_first, declv, initv, condv, incrv, body,
pre_body, clauses);
while (nbraces)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
cp_lexer_consume_token (parser->lexer);
nbraces--;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
if (!collapse_err)
error ("collapsed loops not perfectly nested");
collapse_err = true;
cp_parser_statement_seq_opt (parser, NULL);
cp_parser_require (parser, CPP_CLOSE_BRACE, "%<}%>");
}
}
while (for_block)
{
add_stmt (pop_stmt_list (TREE_VALUE (for_block)));
for_block = TREE_CHAIN (for_block);
}
return ret;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop */
#define OMP_FOR_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (1u << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, sb, ret;
unsigned int save;
clauses = cp_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK,
"#pragma omp for", pragma_tok);
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_for_loop (parser, clauses, NULL);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block */
static tree
cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_master (cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block */
static tree
cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_ordered (cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block */
static tree
cp_parser_omp_sections_scope (cp_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
cp_token *tok;
if (!cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"))
return NULL_TREE;
stmt = push_stmt_list ();
if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION)
{
unsigned save;
substmt = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
while (1)
{
cp_parser_statement (parser, NULL_TREE, false);
tok = cp_lexer_peek_token (parser->lexer);
if (tok->pragma_kind == PRAGMA_OMP_SECTION)
break;
if (tok->type == CPP_CLOSE_BRACE)
break;
if (tok->type == CPP_EOF)
break;
}
cp_parser_end_omp_structured_block (parser, save);
substmt = finish_omp_structured_block (substmt);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
while (1)
{
tok = cp_lexer_peek_token (parser->lexer);
if (tok->type == CPP_CLOSE_BRACE)
break;
if (tok->type == CPP_EOF)
break;
if (tok->pragma_kind == PRAGMA_OMP_SECTION)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_require_pragma_eol (parser, tok);
error_suppress = false;
}
else if (!error_suppress)
{
cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = cp_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'");
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
add_stmt (stmt);
return stmt;
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope */
#define OMP_SECTIONS_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, ret;
clauses = cp_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK,
"#pragma omp sections", pragma_tok);
ret = cp_parser_omp_sections_scope (parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
return ret;
}
/* OpenMP 2.5:
# pragma parallel parallel-clause new-line
# pragma parallel for parallel-for-clause new-line
# pragma parallel sections parallel-sections-clause new-line */
#define OMP_PARALLEL_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_COPYIN) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS))
static tree
cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok)
{
enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL;
const char *p_name = "#pragma omp parallel";
tree stmt, clauses, par_clause, ws_clause, block;
unsigned int mask = OMP_PARALLEL_CLAUSE_MASK;
unsigned int save;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
cp_lexer_consume_token (parser->lexer);
p_kind = PRAGMA_OMP_PARALLEL_FOR;
p_name = "#pragma omp parallel for";
mask |= OMP_FOR_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "sections") == 0)
{
cp_lexer_consume_token (parser->lexer);
p_kind = PRAGMA_OMP_PARALLEL_SECTIONS;
p_name = "#pragma omp parallel sections";
mask |= OMP_SECTIONS_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
}
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
switch (p_kind)
{
case PRAGMA_OMP_PARALLEL:
cp_parser_already_scoped_statement (parser);
par_clause = clauses;
break;
case PRAGMA_OMP_PARALLEL_FOR:
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
cp_parser_omp_for_loop (parser, ws_clause, &par_clause);
break;
case PRAGMA_OMP_PARALLEL_SECTIONS:
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
stmt = cp_parser_omp_sections_scope (parser);
if (stmt)
OMP_SECTIONS_CLAUSES (stmt) = ws_clause;
break;
default:
gcc_unreachable ();
}
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_omp_parallel (par_clause, block);
if (p_kind != PRAGMA_OMP_PARALLEL)
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block */
#define OMP_SINGLE_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt = make_node (OMP_SINGLE);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single", pragma_tok);
OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
structured-block */
#define OMP_TASK_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_UNTIED) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED))
static tree
cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, block;
unsigned int save;
clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
"#pragma omp task", pragma_tok);
block = begin_omp_task ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_task (clauses, block);
}
/* OpenMP 3.0:
# pragma omp taskwait new-line */
static void
cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_taskwait ();
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok)
{
tree vars;
vars = cp_parser_omp_var_list (parser, 0, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
if (!targetm.have_tls)
sorry ("threadprivate variables not supported in this target");
finish_omp_threadprivate (vars);
}
/* Main entry point to OpenMP statement pragmas. */
static void
cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt;
switch (pragma_tok->pragma_kind)
{
case PRAGMA_OMP_ATOMIC:
cp_parser_omp_atomic (parser, pragma_tok);
return;
case PRAGMA_OMP_CRITICAL:
stmt = cp_parser_omp_critical (parser, pragma_tok);
break;
case PRAGMA_OMP_FOR:
stmt = cp_parser_omp_for (parser, pragma_tok);
break;
case PRAGMA_OMP_MASTER:
stmt = cp_parser_omp_master (parser, pragma_tok);
break;
case PRAGMA_OMP_ORDERED:
stmt = cp_parser_omp_ordered (parser, pragma_tok);
break;
case PRAGMA_OMP_PARALLEL:
stmt = cp_parser_omp_parallel (parser, pragma_tok);
break;
case PRAGMA_OMP_SECTIONS:
stmt = cp_parser_omp_sections (parser, pragma_tok);
break;
case PRAGMA_OMP_SINGLE:
stmt = cp_parser_omp_single (parser, pragma_tok);
break;
case PRAGMA_OMP_TASK:
stmt = cp_parser_omp_task (parser, pragma_tok);
break;
default:
gcc_unreachable ();
}
if (stmt)
SET_EXPR_LOCATION (stmt, pragma_tok->location);
}
/* The parser. */
static GTY (()) cp_parser *the_parser;
/* Special handling for the first token or line in the file. The first
thing in the file might be #pragma GCC pch_preprocess, which loads a
PCH file, which is a GC collection point. So we need to handle this
first pragma without benefit of an existing lexer structure.
Always returns one token to the caller in *FIRST_TOKEN. This is
either the true first token of the file, or the first token after
the initial pragma. */
static void
cp_parser_initial_pragma (cp_token *first_token)
{
tree name = NULL;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS)
return;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type == CPP_STRING)
{
name = first_token->u.value;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type != CPP_PRAGMA_EOL)
error ("junk at end of %<#pragma GCC pch_preprocess%>");
}
else
error ("expected string literal");
/* Skip to the end of the pragma. */
while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF)
cp_lexer_get_preprocessor_token (NULL, first_token);
/* Now actually load the PCH file. */
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
/* Read one more token to return to our caller. We have to do this
after reading the PCH file in, since its pointers have to be
live. */
cp_lexer_get_preprocessor_token (NULL, first_token);
}
/* Normal parsing of a pragma token. Here we can (and must) use the
regular lexer. */
static bool
cp_parser_pragma (cp_parser *parser, enum pragma_context context)
{
cp_token *pragma_tok;
unsigned int id;
pragma_tok = cp_lexer_consume_token (parser->lexer);
gcc_assert (pragma_tok->type == CPP_PRAGMA);
parser->lexer->in_pragma = true;
id = pragma_tok->pragma_kind;
switch (id)
{
case PRAGMA_GCC_PCH_PREPROCESS:
error ("%<#pragma GCC pch_preprocess%> must be first");
break;
case PRAGMA_OMP_BARRIER:
switch (context)
{
case pragma_compound:
cp_parser_omp_barrier (parser, pragma_tok);
return false;
case pragma_stmt:
error ("%<#pragma omp barrier%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_FLUSH:
switch (context)
{
case pragma_compound:
cp_parser_omp_flush (parser, pragma_tok);
return false;
case pragma_stmt:
error ("%<#pragma omp flush%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_TASKWAIT:
switch (context)
{
case pragma_compound:
cp_parser_omp_taskwait (parser, pragma_tok);
return false;
case pragma_stmt:
error ("%<#pragma omp taskwait%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_THREADPRIVATE:
cp_parser_omp_threadprivate (parser, pragma_tok);
return false;
case PRAGMA_OMP_ATOMIC:
case PRAGMA_OMP_CRITICAL:
case PRAGMA_OMP_FOR:
case PRAGMA_OMP_MASTER:
case PRAGMA_OMP_ORDERED:
case PRAGMA_OMP_PARALLEL:
case PRAGMA_OMP_SECTIONS:
case PRAGMA_OMP_SINGLE:
case PRAGMA_OMP_TASK:
if (context == pragma_external)
goto bad_stmt;
cp_parser_omp_construct (parser, pragma_tok);
return true;
case PRAGMA_OMP_SECTION:
error ("%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
break;
default:
gcc_assert (id >= PRAGMA_FIRST_EXTERNAL);
c_invoke_pragma_handler (id);
break;
bad_stmt:
cp_parser_error (parser, "expected declaration specifiers");
break;
}
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value)
{
cp_token *tok;
enum cpp_ttype ret;
tok = cp_lexer_peek_token (the_parser->lexer);
ret = tok->type;
*value = tok->u.value;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else if (ret == CPP_STRING)
*value = cp_parser_string_literal (the_parser, false, false);
else
{
cp_lexer_consume_token (the_parser->lexer);
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
}
return ret;
}
/* External interface. */
/* Parse one entire translation unit. */
void
c_parse_file (void)
{
bool error_occurred;
static bool already_called = false;
if (already_called)
{
sorry ("inter-module optimizations not implemented for C++");
return;
}
already_called = true;
the_parser = cp_parser_new ();
push_deferring_access_checks (flag_access_control
? dk_no_deferred : dk_no_check);
error_occurred = cp_parser_translation_unit (the_parser);
the_parser = NULL;
}
/* This variable must be provided by every front end. */
int yydebug;
#include "gt-cp-parser.h"
|
DRB005-indirectaccess1-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program is extracted from a real application at LLNL.
Two pointers (xa1 and xa2) have a pair of values with a distance of 12.
They are used as start base addresses for two 1-D arrays.
Their index set has two indices with distance of 12: 999 +12 = 1011.
So there is loop carried dependence.
However, having loop carried dependence does not mean data races will always happen.
The iterations with loop carried dependence must be scheduled to
different threads in order for data races to happen.
In this example, we use schedule(static,1) to increase the chance that
the dependent loop iterations will be scheduled to different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
/* change original 921 to 923 = 911+12 */
int indexSet[180] = {521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 923, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013};
int main(int argc, char * argv[])
{
/* max index value is 2013. +1 to ensure a reference like base[2015] */
/* Pointers will never access the same offset as (xa2 = base + 2014). */
double * base = (double * )malloc(sizeof (double)*(((2013+1)+2013)+1));
double * xa1 = base;
double * xa2 = xa1+2014;
int i;
int _ret_val_0;
if (base==0)
{
printf("Error in malloc(). Aborting ...\n");
_ret_val_0=1;
return _ret_val_0;
}
/* initialize segments touched by indexSet */
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for
for (i=521; i<=2025; ++ i)
{
base[i]=(0.5*i);
}
/* default static even scheduling may not trigger data race, using static,1 instead. */
#pragma loop name main#1
for (i=0; i<180; ++ i)
{
int idx = indexSet[i];
xa1[idx]+=(1.0+i);
xa2[idx]+=(3.0+i);
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free(base);
_ret_val_0=0;
return _ret_val_0;
}
|
stt.c | #include "common_clib.h"
/* Calculation of the STT field using the Zhang Li formalism, which
* is
* ( \vec{j}_s \cdot \nabla ) \vec{m}
*
* where j_s is the current vector in 3D
* For this computation we use the neighbours matrix to make easier the
* derivative calculation at the boundaries.
* ** For now this only works with the CUBOID mesh
*
* We have two derivatives, d/dx and d/dy which we approximate using a
* second order finite difference. Thus, we must distinguish the boundary
* cases where there is a single neighbour:
*
* - With two neighbours, i.e. in the inner region of the mesh,
* the derivatives are simply:
* f(x + 1, y, z) - f(x - 1, y, z) / 2 dx
* f(x, y + 1, z) - f(x, y - 1, z) / 2 dy
* f(x, y, z + 1) - f(x, y, z - 1) / 2 dz
*
* where x+-1 is the neighbour to the left or right.
*
* - With a single neighbour, it must be changed to
* f(x, y, z) - f(x - 1, y, z) / dx --> no NN to the right
* OR
* f(x + 1, y, z) - f(x, y, z) / dx --> no NN to the left
* (e.g. no PBC for the 0th
* spin in a mesh)
*
* In the code, we denote nn_x1 for the index of the NN at the left of the
* i-th site and nn_x2 for the NN to the right. These variables are simply
* the i-th index (in the corresponding cases) when there is a single NN
*
* Similar for y and z
*
* n is the number of spins or lattice sites in the system
*
* Vector Fields f have 3 * n entries (e.g. magnetisation=spin, stt field)
* in the order: [fx0, fy0, fz0, fx1, fy1, fz1, fx2, ...]
*
* Scalar fields have n entries
*
* The nearest neighbour matrix contains 6 * n indexes, that are the indexes
* of the 6 NNs for every lattice site.
* The order of this array is:
*
* ngbs = [ x0-1 x0+1 y0-1 y0+1 z0-1 z0+1 x1-1 y1-1 ...]
*
* where xi-1 is the index of the NN in the -x direction of the i-th site
* Neighbouring sites where there is no material, has index -1
*
*/
void compute_stt_field_c(double *restrict spin, double *restrict field, double *restrict jx, double *restrict jy, double *restrict jz,
double dx, double dy, double dz, int *restrict ngbs, int n) {
//#pragma omp parallel for
for (int i = 0; i < 3 * n; i++) {
field[i] = 0;
}
#pragma omp parallel for
/* Iterate through every lattice site */
for (int i = 0; i < n; i++){
/* Starting index for the NNs of the i-th site
* i+0, i+1, i+2, i+3 ... --> -x, +x, -y, +y ...
*/
int nn_i = 6 * i;
double factor_x, factor_y, factor_z;
int nn_x1, nn_x2, nn_y1, nn_y2, nn_z1, nn_z2;
/* Here we distinguish if there are 2 NNs, no NN in the
* -x direction, or no NN in the +x direction,
* or no NN at all in the x dir.
* In the latest case, make factor_x equal to zero to avoid
* summing field to the for loop
*/
if(ngbs[nn_i] != -1 && ngbs[nn_i + 1] != -1) {
factor_x = 2;
nn_x1 = ngbs[nn_i];
nn_x2 = ngbs[nn_i + 1];
// Here there is no NN to the right so we make f(x) - f(x-1)
} else if(ngbs[nn_i + 1] == -1 && ngbs[nn_i] != -1){
factor_x = 1;
nn_x1 = ngbs[nn_i];
nn_x2 = i;
// Here there is no NN to the left so we make f(x + 1) - f(x)
} else if(ngbs[nn_i] == -1 && ngbs[nn_i + 1] != -1){
factor_x = 1;
nn_x1 = i;
nn_x2 = ngbs[nn_i + 1];
} else {
factor_x = 0;
}
/* Here we loop to sum to the x, y, z (3*i+0, 3*i+1, 3*i+2) component
* of the field for the i-th spin
* jx is a scalar field, so it only has n entries
* This calculation is: jx[i] * d m[i] / dx
* */
if (factor_x){
for(int j = 0; j < 3; j++){
field[3 * i + j] += jx[i] * (spin[3 * nn_x2 + j]
- spin[3 * nn_x1 + j]) / (factor_x * dx);
}
}
// We do the same along the y direction
if(ngbs[nn_i + 2] != -1 && ngbs[nn_i + 3] != -1) {
factor_y = 2;
nn_y1 = ngbs[nn_i + 2];
nn_y2 = ngbs[nn_i + 3];
} else if(ngbs[nn_i + 3] == -1 && ngbs[nn_i + 2] != -1){
factor_y = 1;
nn_y1 = ngbs[nn_i + 2];
nn_y2 = i;
} else if(ngbs[nn_i + 2] == -1 && ngbs[nn_i + 3] != -1){
factor_y = 1;
nn_y1 = i;
nn_y2 = ngbs[nn_i + 3];
} else {
factor_y = 0;
}
if (factor_y){
for(int j = 0; j < 3; j++){
field[3 * i + j] += jy[i] * (spin[3 * nn_y2 + j]
- spin[3 * nn_y1 + j]) / (factor_y * dy);
}
}
// We do the same along the z direction
if(ngbs[nn_i + 4] >= 0 && ngbs[nn_i + 5] >= 0) {
factor_z = 2;
nn_z1 = ngbs[nn_i + 4];
nn_z2 = ngbs[nn_i + 5];
} else if(ngbs[nn_i + 4] >= 0 && ngbs[nn_i + 5] < 0){
factor_z = 1;
nn_z1 = ngbs[nn_i + 4];
nn_z2 = i;
} else if(ngbs[nn_i + 4] < 0 && ngbs[nn_i + 5] >= 0 ){
factor_z = 1;
nn_z1 = i;
nn_z2 = ngbs[nn_i + 5];
} else {
factor_z = 0;
}
if (factor_z){
for(int j = 0; j < 3; j++){
field[3 * i + j] += jz[i] * (spin[3 * nn_z2 + j]
- spin[3 * nn_z1 + j]) / (factor_z * dz);
}
}
}
}
void llg_stt_rhs(double *restrict dm_dt, double *restrict m, double *restrict h, double *restrict h_stt,
double *restrict alpha, double beta, double u0, double gamma, int n) {
#pragma omp parallel for
for (int index = 0; index < n; index++) {
int i = 3 * index;
int j = 3 * index + 1;
int k = 3 * index + 2;
double coeff = -gamma / (1 + alpha[index] * alpha[index]);
double mm = m[i] * m[i] + m[j] * m[j] + m[k] * m[k];
double mh = m[i] * h[i] + m[j] * h[j] + m[k] * h[k];
//hp=mm.h-mh.m=-mx(mxh)
double hpi = mm*h[i] - mh*m[i];
double hpj = mm*h[j] - mh*m[j];
double hpk = mm*h[k] - mh*m[k];
double mth0 = cross_x(m[i], m[j], m[k], hpi, hpj, hpk);
double mth1 = cross_y(m[i], m[j], m[k], hpi, hpj, hpk);
double mth2 = cross_z(m[i], m[j], m[k], hpi, hpj, hpk);
dm_dt[i] = coeff * (mth0 - hpi * alpha[index]);
dm_dt[j] = coeff * (mth1 - hpj * alpha[index]);
dm_dt[k] = coeff * (mth2 - hpk * alpha[index]);
//the above part is standard LLG equation.
double coeff_stt = u0 / (1 + alpha[index] * alpha[index]);
double mht = m[i] * h_stt[i] + m[j] * h_stt[j] + m[k] * h_stt[k];
hpi = mm*h_stt[i] - mht * m[i];
hpj = mm*h_stt[j] - mht * m[j];
hpk = mm*h_stt[k] - mht * m[k];
mth0 = cross_x(m[i], m[j], m[k], hpi, hpj, hpk);
mth1 = cross_y(m[i], m[j], m[k], hpi, hpj, hpk);
mth2 = cross_z(m[i], m[j], m[k], hpi, hpj, hpk);
dm_dt[i] += coeff_stt * ((1 + alpha[index] * beta) * hpi
- (beta - alpha[index]) * mth0);
dm_dt[j] += coeff_stt * ((1 + alpha[index] * beta) * hpj
- (beta - alpha[index]) * mth1);
dm_dt[k] += coeff_stt * ((1 + alpha[index] * beta) * hpk
- (beta - alpha[index]) * mth2);
double c = 6 * sqrt(dm_dt[i] * dm_dt[i] +
dm_dt[j] * dm_dt[j] +
dm_dt[k]* dm_dt[k]);
dm_dt[i] += c * (1 - mm) * m[i];
dm_dt[j] += c * (1 - mm) * m[j];
dm_dt[k] += c * (1 - mm) * m[k];
}
}
void llg_stt_cpp(double *restrict dm_dt, double *restrict m, double *restrict h, double *restrict p,
double *restrict alpha, int *restrict pins, double *restrict a_J, double beta, double gamma, int n) {
#pragma omp parallel for
for (int index = 0; index < n; index++) {
int i = 3 * index;
int j = 3 * index + 1;
int k = 3 * index + 2;
if (pins[index]>0){
dm_dt[i] = 0;
dm_dt[j] = 0;
dm_dt[k] = 0;
continue;
}
double coeff = -gamma / (1 + alpha[index] * alpha[index]);
double mm = m[i] * m[i] + m[j] * m[j] + m[k] * m[k];
double mh = m[i] * h[i] + m[j] * h[j] + m[k] * h[k];
//hp=mm.h-mh.m=-mx(mxh)
double hpi = mm*h[i] - mh*m[i];
double hpj = mm*h[j] - mh*m[j];
double hpk = mm*h[k] - mh*m[k];
double mth0 = cross_x(m[i], m[j], m[k], hpi, hpj, hpk);
double mth1 = cross_y(m[i], m[j], m[k], hpi, hpj, hpk);
double mth2 = cross_z(m[i], m[j], m[k], hpi, hpj, hpk);
dm_dt[i] = coeff * (mth0 - hpi * alpha[index]);
dm_dt[j] = coeff * (mth1 - hpj * alpha[index]);
dm_dt[k] = coeff * (mth2 - hpk * alpha[index]);
//the above part is standard LLG equation.
double coeff_stt = a_J[index] / (1 + alpha[index] * alpha[index]);
double mp = m[i] * p[i] + m[j] * p[j] + m[k] * p[k];
hpi = mm*p[i] - mp * m[i];
hpj = mm*p[j] - mp * m[j];
hpk = mm*p[k] - mp * m[k];
mth0 = cross_x(m[i], m[j], m[k], hpi, hpj, hpk);
mth1 = cross_y(m[i], m[j], m[k], hpi, hpj, hpk);
mth2 = cross_z(m[i], m[j], m[k], hpi, hpj, hpk);
dm_dt[i] += coeff_stt * ((1 + alpha[index] * beta) * hpi
- (beta - alpha[index]) * mth0);
dm_dt[j] += coeff_stt * ((1 + alpha[index] * beta) * hpj
- (beta - alpha[index]) * mth1);
dm_dt[k] += coeff_stt * ((1 + alpha[index] * beta) * hpk
- (beta - alpha[index]) * mth2);
double c = 6 * sqrt(dm_dt[i] * dm_dt[i] +
dm_dt[j] * dm_dt[j] +
dm_dt[k]* dm_dt[k]);
dm_dt[i] += c * (1 - mm) * m[i];
dm_dt[j] += c * (1 - mm) * m[j];
dm_dt[k] += c * (1 - mm) * m[k];
}
}
|
GB_unaryop__lnot_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int16
// op(A') function: GB_tran__lnot_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int16
(
uint8_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2015 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether an "incomplete type" error was given for the type. */
#define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For a PARM_DECL, nonzero if it was declared as an array. */
#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int_n,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void maybe_warn_string_init (location_t, tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (location_t, struct obstack *);
extern void push_init_level (location_t, int, struct obstack *);
extern struct c_expr pop_init_level (location_t, int, struct obstack *);
extern void set_init_index (location_t, tree, tree, struct obstack *);
extern void set_init_label (location_t, tree, struct obstack *);
extern void process_init_element (location_t, struct c_expr, bool,
struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree, bool);
extern void c_finish_case (tree, tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_finish_oacc_parallel (location_t, tree, tree);
extern tree c_finish_oacc_kernels (location_t, tree, tree);
extern tree c_finish_oacc_data (location_t, tree, tree);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* In c-decl.c */
/* Tell the binding oracle what kind of binding we are looking for. */
enum c_oracle_request
{
C_ORACLE_SYMBOL,
C_ORACLE_TAG,
C_ORACLE_LABEL
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier);
extern c_binding_oracle_function *c_binding_oracle;
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
extern void c_pushtag (location_t, tree, tree);
extern void c_bind (location_t, tree, bool);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c99 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
trapezoidal_prac.c | /*
Jose Miguel Figarola Prado A01632557
Trapezoidal aproximation for integral calculation using omp library.
Wednesday 1st, December 2021.
*/
#include <stdio.h>
#include <omp.h>
#include <math.h>
// Function to compute integral
#define f(x) sin(x)
int main(){
// main variables
float k;
int i;
int lower = 0;
int upper = 1;
int interval = 6;
// step size or delta x
float delta = (float)(upper - lower) / interval;
// Compute integration values
float integration = f(lower) + f(upper);
// Number of threas EXPORT NUM_OF_THREADS=x
int thread_count = 8;
// Pragma declaration, important private variables
// Integration variables for thread work
// i variable for iteration work
# pragma omp parallel num_threads(thread_count) shared(integration, k) private(i)
{
#pragma omp for
for (i = 1; i <= interval - 1; i++)
{
k = lower + i * delta;
integration = integration + 2 * f(k);
}
}
// Compute integrations value
integration = integration * delta/2;
// Print value
printf("Integral pproximation: %.5f\n", integration);
return 0;
} |
reqramp.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *reqramp(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *reqramp(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *x,*y, *rampparams;
double goal,m,x0,a,b,c,x1;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","x","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc))
{
return NULL;
}
goal = IND(rampparams,0);
m = IND(rampparams,1);
x0 = IND(rampparams,2);
a = IND(rampparams,3);
b = IND(rampparams,4);
c = IND(rampparams,5);
x1 = IND(rampparams,6);
dims[0] = x->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(y,i) = goal*(1-exp(-1*m*(IND(x,i)-x0)))+a*pow((IND(x,i)-x1),2) \
+b*(IND(x,i)-x1)+c;
}
return PyArray_Return(y);
}
static char module_docstring[]="\
This function NEEDS A DOC_STRING.\n\
\n\
Parameters\n\
----------\n\
\n\
Returns\n\
-------\n\
\n\
Revisions\n\
---------\n\
2010-07-30 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\n\
2010-12-24 Nate Lust, UCF\n\
natelust at linux dot com\n\
Converted to C\n\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\n\
";
static PyMethodDef module_methods[] = {
{"reqramp",(PyCFunction)reqramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_reqramp(void)
#else
initreqramp(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"reqramp", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("reqramp", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
lrn_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "lrn_kernel_arm.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <arm_neon.h>
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static struct tab exp_tab;
static struct tab log_tab;
static void init_tab(void)
{
/* Exponent polynomial coefficients */
exp_tab.a0 = vdupq_n_f32(1.f);
exp_tab.a1 = vdupq_n_f32(0.0416598916054f);
exp_tab.a2 = vdupq_n_f32(0.500000596046f);
exp_tab.a3 = vdupq_n_f32(0.0014122662833f);
exp_tab.a4 = vdupq_n_f32(1.00000011921f);
exp_tab.a5 = vdupq_n_f32(0.00833693705499f);
exp_tab.a6 = vdupq_n_f32(0.166665703058f);
exp_tab.a7 = vdupq_n_f32(0.000195780929062f);
/* Logarithm polynomial coefficients */
log_tab.a0 = vdupq_n_f32(-2.29561495781f);
log_tab.a1 = vdupq_n_f32(-2.47071170807f);
log_tab.a2 = vdupq_n_f32(-5.68692588806f);
log_tab.a3 = vdupq_n_f32(-0.165253549814f);
log_tab.a4 = vdupq_n_f32(5.17591238022f);
log_tab.a5 = vdupq_n_f32(0.844007015228f);
log_tab.a6 = vdupq_n_f32(4.58445882797f);
log_tab.a7 = vdupq_n_f32(0.0141278216615f);
}
static inline float32x4_t vfloorq_f32(float32x4_t val)
{
const float32x4_t CONST_1 = vdupq_n_f32(1.f);
const int32x4_t z = vcvtq_s32_f32(val);
const float32x4_t r = vcvtq_f32_s32(z);
return vbslq_f32(vcgtq_f32(r, val), vsubq_f32(r, CONST_1), r);
}
static inline float32x2_t vinvsqrt_f32(float32x2_t x)
{
float32x2_t sqrt_reciprocal = vrsqrte_f32(x);
sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
return sqrt_reciprocal;
}
static inline float32x4_t vinvsqrtq_f32(float32x4_t x)
{
float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
return sqrt_reciprocal;
}
static inline float32x2_t vinv_f32(float32x2_t x)
{
float32x2_t recip = vrecpe_f32(x);
recip = vmul_f32(vrecps_f32(x, recip), recip);
recip = vmul_f32(vrecps_f32(x, recip), recip);
return recip;
}
static inline float32x4_t vinvq_f32(float32x4_t x)
{
float32x4_t recip = vrecpeq_f32(x);
recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
return recip;
}
static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, struct tab* coeffs)
{
float32x4_t A = vmlaq_f32(coeffs->a0, coeffs->a4, x);
float32x4_t B = vmlaq_f32(coeffs->a2, coeffs->a6, x);
float32x4_t C = vmlaq_f32(coeffs->a1, coeffs->a5, x);
float32x4_t D = vmlaq_f32(coeffs->a3, coeffs->a7, x);
float32x4_t x2 = vmulq_f32(x, x);
float32x4_t x4 = vmulq_f32(x2, x2);
float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4);
return res;
}
static inline float32x4_t vexpq_f32(float32x4_t x)
{
const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2)
const float32x4_t CONST_INV_LN2 = vdupq_n_f32(1.4426950408f); // 1/ln(2)
const float32x4_t CONST_0 = vdupq_n_f32(0.f);
const int32x4_t CONST_NEGATIVE_126 = vdupq_n_s32(-126);
// Perform range reduction [-log(2),log(2)]
int32x4_t m = vcvtq_s32_f32(vmulq_f32(x, CONST_INV_LN2));
float32x4_t val = vmlsq_f32(x, vcvtq_f32_s32(m), CONST_LN2);
// Polynomial Approximation
float32x4_t poly = vtaylor_polyq_f32(val, &exp_tab);
// Reconstruct
poly = vreinterpretq_f32_s32(vqaddq_s32(vreinterpretq_s32_f32(poly), vqshlq_n_s32(m, 23)));
poly = vbslq_f32(vcltq_s32(m, CONST_NEGATIVE_126), CONST_0, poly);
return poly;
}
static inline float32x4_t vlogq_f32(float32x4_t x)
{
const int32x4_t CONST_127 = vdupq_n_s32(127); // 127
const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2)
// Extract exponent
int32x4_t m = vsubq_s32(vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), CONST_127);
float32x4_t val = vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
// Polynomial Approximation
float32x4_t poly = vtaylor_polyq_f32(val, &log_tab);
// Reconstruct
poly = vmlaq_f32(poly, vcvtq_f32_s32(m), CONST_LN2);
return poly;
}
static inline float32x4_t vtanhq_f32(float32x4_t val)
{
const float32x4_t CONST_1 = vdupq_n_f32(1.f);
const float32x4_t CONST_2 = vdupq_n_f32(2.f);
const float32x4_t CONST_MIN_TANH = vdupq_n_f32(-10.f);
const float32x4_t CONST_MAX_TANH = vdupq_n_f32(10.f);
float32x4_t x = vminq_f32(vmaxq_f32(val, CONST_MIN_TANH), CONST_MAX_TANH);
float32x4_t exp2x = vexpq_f32(vmulq_f32(CONST_2, x));
float32x4_t num = vsubq_f32(exp2x, CONST_1);
float32x4_t den = vaddq_f32(exp2x, CONST_1);
float32x4_t tanh = vmulq_f32(num, vinvq_f32(den));
return tanh;
}
static inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n)
{
return vexpq_f32(vmulq_f32(n, vlogq_f32(val)));
}
static void lrn_kernel(int i, int id, void* data, const float* input, float* output, float* square, float alpha,
float beta, float bias, int local_size, int channel_size, int channel_num, int num_thread)
{
int step = ((int*)data)[0];
const float32x4_t alpha_vec = vdupq_n_f32(alpha / local_size);
const float32x4_t beta_vec = vdupq_n_f32(beta);
const float32x4_t bias_vec = vdupq_n_f32(bias);
int mod = channel_size / 4;
int start_c = step * id;
int end_c = step * id + step;
// #pragma omp parallel for num_threads(num_thread)
for (int j = start_c; j < end_c; j++)
{
int c_start = j - local_size / 2;
int c_end = j + local_size / 2;
c_start = MAX(0, c_start);
c_end = MIN(c_end, channel_num - 1);
const float* cur_input = input + j * channel_size;
float* cur_output = output + j * channel_size;
for (int m = 0; m < mod; m++)
{
float32x4_t accu = vdupq_n_f32(0.f);
for (int l = c_start; l <= c_end; l++)
{
accu = vaddq_f32(accu, vld1q_f32(square + l * channel_size + m * 4));
}
const float32x4_t normalized = vpowq_f32(vmlaq_f32(bias_vec, alpha_vec, accu), beta_vec);
const float32x4_t normalized_pixel = vmulq_f32(vld1q_f32(cur_input), vinvq_f32(normalized));
vst1q_f32(cur_output, normalized_pixel);
cur_input += 4;
cur_output += 4;
}
float alpha_over_size = alpha / local_size;
for (int m = 4 * mod; m < channel_size; m++)
{
float sum = 0;
for (int l = c_start; l <= c_end; l++)
{
sum = sum + square[l * channel_size + m];
}
*cur_output++ = *cur_input++ * pow(bias + alpha_over_size * sum, -beta);
}
}
}
int lrn_run(struct tensor* output_tensor, struct tensor* input_tensor, struct lrn_param* lrn_param,
int num_thread)
{
init_tab();
const float* input = (float*)input_tensor->data;
float* output = (float*)output_tensor->data;
float* square = (float*)(malloc(input_tensor->elem_num * sizeof(float)));
int n = input_tensor->dims[0];
int c = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int img_size = c * h * w;
int channel_size = h * w;
float alpha = lrn_param->alpha;
float beta = lrn_param->beta;
float bias = lrn_param->k;
int local_size = lrn_param->local_size;
for (int i = 0; i < n; i++)
{
/* get square value */
const float* img_base = input + i * img_size;
float* out_base = output + i * img_size;
int j = 0;
for (; j < (img_size & -4); j += 4)
{
float32x4_t in = vld1q_f32(img_base + j);
in = vmulq_f32(in, in);
vst1q_f32(square + j, in);
}
for (; j < img_size; j++)
square[j] = img_base[j] * img_base[j];
if (lrn_param->norm_region != 0)
{
sys_free(square);
TLOG_ERR("LRN: Only support across channels\n");
return -1;
}
lrn_kernel(0, 0, &c, img_base, out_base, square, alpha, beta, bias, local_size, channel_size, c, num_thread);
}
free(square);
return 0;
}
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c -lm
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
int writeMandelbrot(const char *fileName, int width, int height, float *img, int minI, int maxI);
#define MXITER 2048
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t dc, float *count){
#pragma omp for
for(int n=0;n<Nim;++n){
for(int m=0;m<Nre;++m){
complex_t c;
c.r = cmin.r + dc.r*m;
c.i = cmin.i + dc.i*n;
count[m+n*Nre] = (float) testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 32
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[argc-1]);
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
complex_t dc;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
//set step sizes
dc.r = (cmax.r-cmin.r)/(Nre-1);
dc.i = (cmax.i-cmin.i)/(Nim-1);
// replace with omp wtime
double stime = omp_get_wtime(); //start time in CPU cycles
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, dc, count);
// replace with omp wtime
double etime = omp_get_wtime(); //start time in CPU cycles
double time = etime - stime;
// print elapsed time
printf("elapsed = %f\n", time);
// output mandelbrot to png format image
printf("Printing mandelbrot.ppm...");
writeMandelbrot("mandelbrot.ppm", Nre, Nim, count, 0, 80);
free(count);
exit(0);
return 0;
}
/* Output data as PPM file */
void saveppm(const char *filename, unsigned char *img, int width, int height){
/* FILE pointer */
FILE *f;
/* Open file for writing */
f = fopen(filename, "wb");
/* PPM header info, including the size of the image */
fprintf(f, "P6 %d %d %d\n", width, height, 255);
/* Write the image data to the file - remember 3 byte per pixel */
fwrite(img, 3, width*height, f);
/* Make sure you close the file */
fclose(f);
}
int writeMandelbrot(const char *fileName, int width, int height, float *img, int minI, int maxI){
int n, m;
unsigned char *rgb = (unsigned char*) calloc(3*width*height, sizeof(unsigned char));
for(n=0;n<height;++n){
for(m=0;m<width;++m){
int id = m+n*width;
int I = (int) (768*sqrt((double)(img[id]-minI)/(maxI-minI)));
// change this to change palette
if(I<256) rgb[3*id+2] = 255-I;
else if(I<512) rgb[3*id+1] = 511-I;
else if(I<768) rgb[3*id+0] = 767-I;
else if(I<1024) rgb[3*id+0] = 1023-I;
else if(I<1536) rgb[3*id+1] = 1535-I;
else if(I<2048) rgb[3*id+2] = 2047-I;
}
}
saveppm(fileName, rgb, width, height);
free(rgb);
}
|
primitives_inl.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
namespace nvbio {
// return true if any item in the range [0,n) evaluates to true
//
template <typename PredicateIterator>
bool any(
const host_tag tag,
const uint32 n,
const PredicateIterator pred)
{
return thrust::reduce(
pred,
pred + n,
false,
thrust::logical_or<bool>() );
}
// return true if all items in the range [0,n) evaluate to true
//
template <typename PredicateIterator>
bool all(
const host_tag tag,
const uint32 n,
const PredicateIterator pred)
{
return thrust::reduce(
pred,
pred + n,
true,
thrust::logical_and<bool>() );
}
#if defined(__CUDACC__)
// return true if any item in the range [0,n) evaluates to true
//
template <typename PredicateIterator>
bool any(
const device_tag tag,
const uint32 n,
const PredicateIterator pred)
{
return cuda::any( n, pred );
}
// return true if any item in the range [0,n) evaluates to true
//
template <typename PredicateIterator>
bool all(
const device_tag tag,
const uint32 n,
const PredicateIterator pred)
{
return cuda::all( n, pred );
}
#endif
// return true if any item in the range [0,n) evaluates to true
//
template <typename system_tag, typename PredicateIterator>
bool any(
const uint32 n,
const PredicateIterator pred)
{
return any( system_tag(), n, pred );
}
// return true if all items in the range [0,n) evaluate to true
//
template <typename system_tag, typename PredicateIterator>
bool all(
const uint32 n,
const PredicateIterator pred)
{
return all( system_tag(), n, pred );
}
// a pseudo-iterator to evaluate the predicate (it1[i] <= it2[i]) for arbitrary iterator pairs
//
template <typename Iterator1, typename Iterator2>
struct is_sorted_iterator
{
typedef bool value_type;
typedef value_type& reference;
typedef value_type const_reference;
typedef value_type* pointer;
typedef typename std::iterator_traits<Iterator1>::difference_type difference_type;
typedef typename std::iterator_traits<Iterator1>::iterator_category iterator_category;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
is_sorted_iterator(const Iterator1 _it1, const Iterator2 _it2) : it1( _it1 ), it2( _it2 ) {}
// dereference operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator[] (const uint64 i) const { return it1[i] <= it2[i]; }
// dereference operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator* () const { return it1[0] <= it2[0]; }
// dereference operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
is_sorted_iterator& operator++ () { ++it1; ++it2; return *this; }
Iterator1 it1;
Iterator2 it2;
};
// operator+
template <typename T1, typename T2>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
is_sorted_iterator<T1,T2> operator+ (const is_sorted_iterator<T1,T2> it, const int64 i)
{
return is_sorted_iterator<T1,T2>( it.it1 + i, it.it2 + i );
}
// operator-
template <typename T1, typename T2>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
int64 operator- (const is_sorted_iterator<T1,T2> it1, const is_sorted_iterator<T1,T2> it2)
{
return it1.it1 - it2.it1;
}
// operator!=
template <typename T1, typename T2>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator!= (const is_sorted_iterator<T1,T2> it1, const is_sorted_iterator<T1,T2> it2)
{
return it1.it1 != it2.it1;
}
// operator==
template <typename T1, typename T2>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator== (const is_sorted_iterator<T1,T2> it1, const is_sorted_iterator<T1,T2> it2)
{
return it1.it1 == it2.it1;
}
// a pseudo-iterator to evaluate the predicate (hd[i] || (it1[i] <= it2[i])) for arbitrary iterator pairs
//
template <typename Iterator1, typename Iterator2, typename Headflags>
struct is_segment_sorted_iterator
{
typedef bool value_type;
typedef value_type& reference;
typedef value_type const_reference;
typedef value_type* pointer;
typedef typename std::iterator_traits<Iterator1>::difference_type difference_type;
typedef typename std::iterator_traits<Iterator1>::iterator_category iterator_category;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
is_segment_sorted_iterator(const Iterator1 _it1, const Iterator2 _it2, const Headflags _hd) : it1( _it1 ), it2( _it2 ), hd(_hd) {}
// dereference operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator[] (const uint64 i) const { return hd[i] || (it1[i] <= it2[i]); }
// dereference operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator* () const { return hd[0] || (it1[0] <= it2[0]); }
// dereference operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
is_segment_sorted_iterator& operator++ () { ++it1; ++it2; ++hd; return *this; }
Iterator1 it1;
Iterator2 it2;
Headflags hd;
};
// operator+
template <typename T1, typename T2, typename H>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
is_segment_sorted_iterator<T1,T2,H> operator+ (const is_segment_sorted_iterator<T1,T2,H> it, const int64 i)
{
return is_segment_sorted_iterator<T1,T2,H>( it.it1 + i, it.it2 + i, it.hd + i );
}
// operator-
template <typename T1, typename T2, typename H>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
int64 operator- (const is_segment_sorted_iterator<T1,T2,H> it1, const is_segment_sorted_iterator<T1,T2,H> it2)
{
return it1.it1 - it2.it1;
}
// operator!=
template <typename T1, typename T2, typename H>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator!= (const is_segment_sorted_iterator<T1,T2,H> it1, const is_segment_sorted_iterator<T1,T2,H> it2)
{
return it1.it1 != it2.it1;
}
// operator==
template <typename T1, typename T2, typename H>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator== (const is_segment_sorted_iterator<T1,T2,H> it1, const is_segment_sorted_iterator<T1,T2,H> it2)
{
return it1.it1 == it2.it1;
}
// return true if the items in the range [0,n) are sorted
//
template <typename system_tag, typename Iterator>
bool is_sorted(
const uint32 n,
const Iterator values)
{
return all<system_tag>( n-1, is_sorted_iterator<Iterator,Iterator>( values, values+1 ) );
}
// return true if the items in the range [0,n) are sorted by segment, where
// the beginning of each segment is identified by a set head flag
//
template <typename system_tag, typename Iterator, typename Headflags>
bool is_segment_sorted(
const uint32 n,
const Iterator values,
const Headflags flags)
{
return all<system_tag>( n-1, is_segment_sorted_iterator<Iterator,Iterator,Headflags>( values, values+1, flags+1 ) );
}
// invoke a functor for each element of the given sequence
//
template <typename Iterator, typename Functor>
void for_each(
const host_tag tag,
const uint64 n,
const Iterator in,
Functor functor)
{
#if defined(_OPENMP)
#pragma omp parallel for if (n >= 256)
#endif
for (int64 i = 0; i < int64(n); ++i)
functor( in[i] );
}
// invoke a functor for each element of the given sequence
//
template <typename Iterator, typename Functor>
void for_each(
const device_tag tag,
const uint64 n,
const Iterator in,
Functor functor)
{
thrust::for_each( in, in + n, functor );
}
// invoke a functor for each element of the given sequence
//
template <typename system_tag, typename Iterator, typename Functor>
void for_each(
const uint64 n,
const Iterator in,
Functor functor)
{
return for_each( system_tag(), n, in, functor );
}
// apply a functor to each element of the given sequence
//
template <typename Iterator, typename Output, typename Functor>
void transform(
const device_tag tag,
const uint64 n,
const Iterator in,
const Output out,
const Functor functor)
{
thrust::transform( in, in + n, out, functor );
}
// apply a functor to each element of the given sequence
//
template <typename Iterator, typename Output, typename Functor>
void transform(
const host_tag tag,
const uint32 n,
const Iterator in,
const Output out,
const Functor functor)
{
#if defined(_OPENMP)
#pragma omp parallel for if (n >= 256)
#endif
for (int64 i = 0; i < int64(n); ++i)
out[i] = functor( in[i] );
}
// apply a binary functor to each pair of elements of the given sequences
//
template <typename Iterator1, typename Iterator2, typename Output, typename Functor>
void transform(
const device_tag tag,
const uint32 n,
const Iterator1 in1,
const Iterator2 in2,
const Output out,
const Functor functor)
{
thrust::transform( in1, in1 + n, in2, out, functor );
}
// apply a binary functor to each pair of elements of the given sequences
//
template <typename Iterator1, typename Iterator2, typename Output, typename Functor>
void transform(
const host_tag tag,
const uint32 n,
const Iterator1 in1,
const Iterator2 in2,
const Output out,
const Functor functor)
{
#if defined(_OPENMP)
#pragma omp parallel for if (n >= 256)
#endif
for (int64 i = 0; i < int64(n); ++i)
out[i] = functor( in1[i], in2[i] );
}
// apply a functor to each element of the given sequence
//
template <typename system_tag, typename Iterator, typename Output, typename Functor>
void transform(
const uint32 n,
const Iterator in,
const Output out,
const Functor functor)
{
transform( system_tag(), n, in, out, functor );
}
// apply a binary functor to each pair of elements of the given sequences
//
template <typename system_tag, typename Iterator1, typename Iterator2, typename Output, typename Functor>
void transform(
const uint32 n,
const Iterator1 in1,
const Iterator2 in2,
const Output out,
const Functor functor)
{
transform( system_tag(), n, in1, in2, out, functor );
}
// host-wide reduce
//
// \param n number of items to reduce
// \param in a system iterator
// \param op the binary reduction operator
// \param temp_storage some temporary storage
//
template <typename InputIterator, typename BinaryOp>
typename std::iterator_traits<InputIterator>::value_type reduce(
host_tag tag,
const uint32 n,
InputIterator in,
BinaryOp op,
nvbio::vector<host_tag,uint8>& temp_storage)
{
return thrust::reduce( in, in + n, 0u, op );
}
// host-wide inclusive scan
//
// \param n number of items to reduce
// \param in a device input iterator
// \param out a device output iterator
// \param op the binary reduction operator
// \param temp_storage some temporary storage
//
template <typename InputIterator, typename OutputIterator, typename BinaryOp>
void inclusive_scan(
host_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
BinaryOp op,
nvbio::vector<host_tag,uint8>& temp_storage)
{
thrust::inclusive_scan(
in,
in + n,
out,
op );
}
// host-wide exclusive scan
//
// \param n number of items to reduce
// \param in a device input iterator
// \param out a device output iterator
// \param op the binary reduction operator
// \param identity the identity element
// \param temp_storage some temporary storage
//
template <typename InputIterator, typename OutputIterator, typename BinaryOp, typename Identity>
void exclusive_scan(
host_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
BinaryOp op,
Identity identity,
nvbio::vector<host_tag,uint8>& temp_storage)
{
thrust::exclusive_scan(
in,
in + n,
out,
identity,
op );
}
#if defined(__CUDACC__)
// system-wide reduce
//
// \param n number of items to reduce
// \param in a system iterator
// \param op the binary reduction operator
// \param temp_storage some temporary storage
//
template <typename InputIterator, typename BinaryOp>
typename std::iterator_traits<InputIterator>::value_type reduce(
device_tag tag,
const uint32 n,
InputIterator in,
BinaryOp op,
nvbio::vector<device_tag,uint8>& temp_storage)
{
return cuda::reduce( n, in, op, temp_storage );
}
// device-wide inclusive scan
//
// \param n number of items to reduce
// \param in a device input iterator
// \param out a device output iterator
// \param op the binary reduction operator
// \param temp_storage some temporary storage
//
template <typename InputIterator, typename OutputIterator, typename BinaryOp>
void inclusive_scan(
device_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
BinaryOp op,
nvbio::vector<device_tag,uint8>& temp_storage)
{
cuda::inclusive_scan( n, in, out, op, temp_storage );
}
// device-wide exclusive scan
//
// \param n number of items to reduce
// \param in a device input iterator
// \param out a device output iterator
// \param op the binary reduction operator
// \param identity the identity element
// \param temp_storage some temporary storage
//
template <typename InputIterator, typename OutputIterator, typename BinaryOp, typename Identity>
void exclusive_scan(
device_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
BinaryOp op,
Identity identity,
nvbio::vector<device_tag,uint8>& temp_storage)
{
cuda::exclusive_scan( n, in, out, op, identity, temp_storage );
}
#endif
// system-wide reduce
//
// \param n number of items to reduce
// \param in a system iterator
// \param op the binary reduction operator
// \param temp_storage some temporary storage
//
template <typename system_tag, typename InputIterator, typename BinaryOp>
typename std::iterator_traits<InputIterator>::value_type reduce(
const uint32 n,
InputIterator in,
BinaryOp op,
nvbio::vector<system_tag,uint8>& temp_storage)
{
return reduce(
system_tag(),
n,
in,
op,
temp_storage );
}
// device-wide inclusive scan
//
// \param n number of items to reduce
// \param in a device input iterator
// \param out a device output iterator
// \param op the binary reduction operator
// \param temp_storage some temporary storage
//
template <typename system_tag, typename InputIterator, typename OutputIterator, typename BinaryOp>
void inclusive_scan(
const uint32 n,
InputIterator in,
OutputIterator out,
BinaryOp op,
nvbio::vector<system_tag,uint8>& temp_storage)
{
inclusive_scan(
system_tag(),
n,
in,
out,
op,
temp_storage );
}
// device-wide exclusive scan
//
// \param n number of items to reduce
// \param in a device input iterator
// \param out a device output iterator
// \param op the binary reduction operator
// \param identity the identity element
// \param temp_storage some temporary storage
//
template <typename system_tag, typename InputIterator, typename OutputIterator, typename BinaryOp, typename Identity>
void exclusive_scan(
const uint32 n,
InputIterator in,
OutputIterator out,
BinaryOp op,
Identity identity,
nvbio::vector<system_tag,uint8>& temp_storage)
{
exclusive_scan(
system_tag(),
n,
in,
out,
op,
identity,
temp_storage );
}
// host-wide copy of flagged items
//
// \param n number of input items
// \param in a input iterator
// \param flags a flags iterator
// \param out a output iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename InputIterator, typename FlagsIterator, typename OutputIterator>
uint32 copy_flagged(
const host_tag tag,
const uint32 n,
InputIterator in,
FlagsIterator flags,
OutputIterator out,
nvbio::vector<host_tag,uint8>& temp_storage)
{
return uint32( thrust::copy_if(
in,
in + n,
flags,
out,
nvbio::is_true_functor<bool>() ) - out );
}
// host-wide copy of predicated items
//
// \param n number of input items
// \param in a input iterator
// \param flags a flags iterator
// \param out a output iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename InputIterator, typename OutputIterator, typename Predicate>
uint32 copy_if(
const host_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
const Predicate pred,
nvbio::vector<host_tag,uint8>& temp_storage)
{
return uint32( thrust::copy_if(
in,
in + n,
out,
pred ) - out );
}
// system-wide run-length encode
//
// \param n number of input items
// \param in a system input iterator
// \param out a system output iterator
// \param counts a system output count iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename InputIterator, typename OutputIterator, typename CountIterator>
uint32 runlength_encode(
const host_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
CountIterator counts,
nvbio::vector<host_tag,uint8>& temp_storage)
{
return uint32( thrust::reduce_by_key(
in,
in + n,
thrust::make_constant_iterator<uint32>( 1u ),
out,
counts ).first - out );
};
// system-wide run-length encode
//
// \param n number of input items
// \param keys_in a system input iterator
// \param values_in a system input iterator
// \param keys_out a system output iterator
// \param values_out a system output iterator
// \param reduction_op a reduction operator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename KeyIterator, typename ValueIterator, typename OutputKeyIterator, typename OutputValueIterator, typename ReductionOp>
uint32 reduce_by_key(
const host_tag tag,
const uint32 n,
KeyIterator keys_in,
ValueIterator values_in,
OutputKeyIterator keys_out,
OutputValueIterator values_out,
ReductionOp reduction_op,
nvbio::vector<host_tag,uint8>& temp_storage)
{
typedef typename std::iterator_traits<KeyIterator>::value_type key_type;
return uint32( thrust::reduce_by_key(
keys_in,
keys_in + n,
values_in,
keys_out,
values_out,
nvbio::equal_functor<key_type>(),
reduction_op ).first - keys_out );
}
#if defined(__CUDACC__)
// device-wide copy of flagged items
//
// \param n number of input items
// \param in a input iterator
// \param flags a flags iterator
// \param out a output iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename InputIterator, typename FlagsIterator, typename OutputIterator>
uint32 copy_flagged(
const device_tag tag,
const uint32 n,
InputIterator in,
FlagsIterator flags,
OutputIterator out,
nvbio::vector<device_tag,uint8>& temp_storage)
{
return cuda::copy_flagged( n, in, flags, out, temp_storage );
}
// device-wide copy of predicated items
//
// \param n number of input items
// \param in a input iterator
// \param flags a flags iterator
// \param out a output iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename InputIterator, typename OutputIterator, typename Predicate>
uint32 copy_if(
const device_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
const Predicate pred,
nvbio::vector<device_tag,uint8>& temp_storage)
{
return cuda::copy_if( n, in, out, pred, temp_storage );
}
// system-wide run-length encode
//
// \param n number of input items
// \param in a device input iterator
// \param out a device output iterator
// \param counts a device output count iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename InputIterator, typename OutputIterator, typename CountIterator>
uint32 runlength_encode(
const device_tag tag,
const uint32 n,
InputIterator in,
OutputIterator out,
CountIterator counts,
nvbio::vector<device_tag,uint8>& temp_storage)
{
return cuda::runlength_encode( n, in, out, counts, temp_storage );
};
// device-wide run-length encode
//
// \param n number of input items
// \param keys_in a device input iterator
// \param values_in a device input iterator
// \param keys_out a device output iterator
// \param values_out a device output iterator
// \param reduction_op a reduction operator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename KeyIterator, typename ValueIterator, typename OutputKeyIterator, typename OutputValueIterator, typename ReductionOp>
uint32 reduce_by_key(
const device_tag tag,
const uint32 n,
KeyIterator keys_in,
ValueIterator values_in,
OutputKeyIterator keys_out,
OutputValueIterator values_out,
ReductionOp reduction_op,
nvbio::vector<device_tag,uint8>& temp_storage)
{
return cuda::reduce_by_key(
n,
keys_in,
values_in,
keys_out,
values_out,
reduction_op,
temp_storage );
}
#endif
// system-wide copy of flagged items
//
// \param n number of input items
// \param in a device input iterator
// \param flags a device flags iterator
// \param out a device output iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename system_tag, typename InputIterator, typename FlagsIterator, typename OutputIterator>
uint32 copy_flagged(
const uint32 n,
InputIterator in,
FlagsIterator flags,
OutputIterator out,
nvbio::vector<system_tag,uint8>& temp_storage)
{
return copy_flagged( system_tag(), n, in, flags, out, temp_storage );
};
// system-wide copy of predicated items
//
// \param n number of input items
// \param in a device input iterator
// \param out a device output iterator
// \param pred a unary predicate functor
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename system_tag, typename InputIterator, typename OutputIterator, typename Predicate>
uint32 copy_if(
const uint32 n,
InputIterator in,
OutputIterator out,
const Predicate pred,
nvbio::vector<system_tag,uint8>& temp_storage)
{
return copy_if( system_tag(), n, in, out, pred, temp_storage );
};
// system-wide run-length encode
//
// \param n number of input items
// \param in a system input iterator
// \param out a system output iterator
// \param counts a system output count iterator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename system_tag, typename InputIterator, typename OutputIterator, typename CountIterator>
uint32 runlength_encode(
const uint32 n,
InputIterator in,
OutputIterator out,
CountIterator counts,
nvbio::vector<system_tag,uint8>& temp_storage)
{
return runlength_encode( system_tag(), n, in, out, counts, temp_storage );
};
// system-wide run-length encode
//
// \param n number of input items
// \param keys_in a system input iterator
// \param values_in a system input iterator
// \param keys_out a system output iterator
// \param values_out a system output iterator
// \param reduction_op a reduction operator
// \param temp_storage some temporary storage
//
// \return the number of copied items
//
template <typename system_tag, typename KeyIterator, typename ValueIterator, typename OutputKeyIterator, typename OutputValueIterator, typename ReductionOp>
uint32 reduce_by_key(
const uint32 n,
KeyIterator keys_in,
ValueIterator values_in,
OutputKeyIterator keys_out,
OutputValueIterator values_out,
ReductionOp reduction_op,
nvbio::vector<system_tag,uint8>& temp_storage)
{
return reduce_by_key(
system_tag(),
n,
keys_in,
values_in,
keys_out,
values_out,
reduction_op,
temp_storage );
}
// device-wide lower_bound
//
// \param n number of input items
// \param values a system input iterator of values to be searched
// \param keys a system input iterator of sorted keys
// \param indices a system output iterator
//
template <typename KeyIterator, typename ValueIterator, typename OutputIterator>
void lower_bound(
const device_tag tag,
const uint32 n,
ValueIterator values,
const uint32 n_keys,
KeyIterator keys,
OutputIterator indices)
{
thrust::lower_bound(
keys, keys + n_keys,
values, values + n,
indices );
}
// host-wide lower_bound
//
// \param n number of input items
// \param values a system input iterator of values to be searched
// \param keys a system input iterator of sorted keys
// \param indices a system output iterator
//
template <typename KeyIterator, typename ValueIterator, typename OutputIterator>
void lower_bound(
const host_tag tag,
const uint32 n,
ValueIterator values,
const uint32 n_keys,
KeyIterator keys,
OutputIterator indices)
{
#pragma omp parallel for
for (long i = 0; i < long(n); ++i)
indices[i] = uint32( lower_bound( values[i], keys, n_keys ) - keys );
}
// system-wide lower_bound
//
// \param n number of input items
// \param values a system input iterator of values to be searched
// \param keys a system input iterator of sorted keys
// \param indices a system output iterator
//
template <typename system_tag, typename KeyIterator, typename ValueIterator, typename OutputIterator>
void lower_bound(
const uint32 n,
ValueIterator values,
const uint32 n_keys,
KeyIterator keys,
OutputIterator indices)
{
lower_bound(
system_tag(),
n,
values,
n_keys,
keys,
indices );
}
// device-wide upper_bound
//
// \param n number of input items
// \param values a system input iterator of values to be searched
// \param keys a system input iterator of sorted keys
// \param indices a system output iterator
//
template <typename KeyIterator, typename ValueIterator, typename OutputIterator>
void upper_bound(
const device_tag tag,
const uint32 n,
ValueIterator values,
const uint32 n_keys,
KeyIterator keys,
OutputIterator indices)
{
thrust::upper_bound(
keys, keys + n_keys,
values, values + n,
indices );
}
// host-wide upper_bound
//
// \param n number of input items
// \param values a system input iterator of values to be searched
// \param keys a system input iterator of sorted keys
// \param indices a system output iterator
//
template <typename KeyIterator, typename ValueIterator, typename OutputIterator>
void upper_bound(
const host_tag tag,
const uint32 n,
ValueIterator values,
const uint32 n_keys,
KeyIterator keys,
OutputIterator indices)
{
#pragma omp parallel for
for (long i = 0; i < long(n); ++i)
indices[i] = uint32( upper_bound( values[i], keys, n_keys ) - keys );
}
// system-wide upper_bound
//
// \param n number of input items
// \param values a system input iterator of values to be searched
// \param keys a system input iterator of sorted keys
// \param indices a system output iterator
//
template <typename system_tag, typename KeyIterator, typename ValueIterator, typename OutputIterator>
void upper_bound(
const uint32 n,
ValueIterator values,
const uint32 n_keys,
KeyIterator keys,
OutputIterator indices)
{
upper_bound(
system_tag(),
n,
values,
n_keys,
keys,
indices );
}
#if defined(__CUDACC__)
// device-wide sort
//
// \param n number of input items
// \param keys a system input iterator of keys to be sorted
//
template <typename KeyIterator>
void radix_sort(
const device_tag tag,
const uint32 n,
KeyIterator keys,
nvbio::vector<device_tag,uint8>& temp_storage)
{
typedef typename std::iterator_traits<KeyIterator>::value_type key_type;
cuda::alloc_temp_storage( temp_storage, 2 * n * sizeof(key_type) );
key_type* keys_ptr = reinterpret_cast<key_type*>( raw_pointer( temp_storage ) );
thrust::device_ptr<key_type> keys_buf( keys_ptr );
thrust::copy( keys, keys + n, keys_buf );
cuda::SortBuffers<key_type*> sort_buffers;
sort_buffers.keys[0] = keys_ptr;
sort_buffers.keys[1] = keys_ptr + n;
cuda::SortEnactor sort_enactor;
sort_enactor.sort( n, sort_buffers );
thrust::copy(
keys_buf + sort_buffers.selector * n,
keys_buf + sort_buffers.selector * n + n,
keys );
}
// device-wide sort by key
//
// \param n number of input items
// \param keys a system input iterator of keys to be sorted
// \param values a system input iterator of values to be sorted
//
template <typename KeyIterator, typename ValueIterator>
void radix_sort(
const device_tag tag,
const uint32 n,
KeyIterator keys,
ValueIterator values,
nvbio::vector<device_tag,uint8>& temp_storage)
{
typedef typename std::iterator_traits<KeyIterator>::value_type key_type;
typedef typename std::iterator_traits<ValueIterator>::value_type value_type;
const uint32 aligned_key_bytes = align<16>( 2 * n * sizeof(key_type) );
const uint32 aligned_val_bytes = 2 * n * sizeof(value_type);
cuda::alloc_temp_storage( temp_storage, aligned_key_bytes + aligned_val_bytes );
key_type* keys_ptr = reinterpret_cast<key_type*>( raw_pointer( temp_storage ) );
value_type* values_ptr = reinterpret_cast<value_type*>( raw_pointer( temp_storage ) + aligned_key_bytes );
thrust::device_ptr<key_type> keys_buf( keys_ptr );
thrust::device_ptr<key_type> values_buf( values_ptr );
thrust::copy( keys, keys + n, keys_buf );
thrust::copy( values, values + n, values_buf );
cuda::SortBuffers<key_type*, value_type*> sort_buffers;
sort_buffers.keys[0] = keys_ptr;
sort_buffers.keys[1] = keys_ptr + n;
sort_buffers.values[0] = values_ptr;
sort_buffers.values[1] = values_ptr + n;
cuda::SortEnactor sort_enactor;
sort_enactor.sort( n, sort_buffers );
thrust::copy(
keys_buf + sort_buffers.selector * n,
keys_buf + sort_buffers.selector * n + n,
keys );
thrust::copy(
values_buf + sort_buffers.selector * n,
values_buf + sort_buffers.selector * n + n,
values );
}
#endif
// host-wide sort
//
// \param n number of input items
// \param keys a system input iterator of keys to be sorted
//
template <typename KeyIterator>
void radix_sort(
const host_tag tag,
const uint32 n,
KeyIterator keys,
nvbio::vector<host_tag,uint8>& temp_storage)
{
thrust::sort( keys, keys + n );
}
// system-wide sort
//
// \param n number of input items
// \param keys a system input iterator of keys to be sorted
//
template <typename system_tag, typename KeyIterator>
void radix_sort(
const uint32 n,
KeyIterator keys,
nvbio::vector<system_tag,uint8>& temp_storage)
{
radix_sort( system_tag(), n, keys, temp_storage );
}
// host-wide sort by key
//
// \param n number of input items
// \param keys a system input iterator of keys to be sorted
// \param values a system input iterator of values to be sorted
//
template <typename KeyIterator, typename ValueIterator>
void radix_sort(
const host_tag tag,
const uint32 n,
KeyIterator keys,
ValueIterator values,
nvbio::vector<host_tag,uint8>& temp_storage)
{
thrust::sort_by_key( keys, keys + n, values, temp_storage );
}
// system-wide sort by key
//
// \param n number of input items
// \param keys a system input iterator of keys to be sorted
// \param values a system input iterator of values to be sorted
//
template <typename system_tag, typename KeyIterator, typename ValueIterator>
void radix_sort(
const uint32 n,
KeyIterator keys,
ValueIterator values,
nvbio::vector<system_tag,uint8>& temp_storage)
{
radix_sort( system_tag(), n, keys, values, temp_storage );
}
template <
typename key_iterator1,
typename key_iterator2>
uint2 corank(
const int32 i,
const key_iterator1 A,
const int32 m,
const key_iterator2 B,
const int32 n)
{
int32 j = min( i, m );
int32 k = i - j;
int32 j_lo = i >= n ? i - n : 0;
int32 k_lo = 0;
while (1)
{
if ((j > 0 || k < n) && A[j-1] > B[k])
{
// decrease j
const int32 delta = util::divide_ri( j - j_lo, 2 );
k_lo = k;
j -= delta;
k += delta;
assert( j + k == i );
}
else if ((k > 0 || j < m) && B[k-1] >= A[j])
{
// decrease k
const int32 delta = util::divide_ri( k - k_lo, 2 );
j_lo = j;
j += delta;
k -= delta;
assert( j + k == i );
}
else
break;
}
return make_uint2( uint32(j), uint32(k) );
}
template <
typename key_iterator1,
typename key_iterator2,
typename value_iterator1,
typename value_iterator2,
typename key_output,
typename value_output>
void merge_by_key(
const host_tag tag,
const uint32 A_len,
const uint32 B_len,
const key_iterator1 A_keys,
const key_iterator2 B_keys,
const value_iterator1 A_values,
const value_iterator2 B_values,
key_output C_keys,
value_output C_values)
{
if (A_len == 0)
{
#pragma omp parallel for
for (int32 i = 0; i < int32( B_len ); ++i)
{
C_keys[i] = A_keys[i];
C_values[i] = A_values[i];
}
}
else if (B_len == 0)
{
#pragma omp parallel for
for (int32 i = 0; i < int32( A_len ); ++i)
{
C_keys[i] = A_keys[i];
C_values[i] = A_values[i];
}
}
const uint32 n_threads = (uint32)omp_get_num_procs();
nvbio::vector<host_tag,uint32> A_diag( n_threads+1 );
nvbio::vector<host_tag,uint32> B_diag( n_threads+1 );
const uint32 C_len = A_len + B_len;
A_diag[ n_threads ] = 0;
B_diag[ n_threads ] = 0;
A_diag[ n_threads ] = A_len;
B_diag[ n_threads ] = B_len;
const uint32 n_partition = util::divide_ri( C_len, n_threads );
#pragma omp parallel for num_threads(n_threads)
for (int32 i = 1; i < int32( n_threads ); ++i)
{
const int32 index = i * n_partition;
const uint2 jk = corank( index, A_keys, A_len, B_keys, B_len );
A_diag[i] = jk.x;
B_diag[i] = jk.y;
}
#pragma omp parallel for num_threads(n_threads)
for (int32 i = 0; i < int32( n_threads ); ++i)
{
nvbio::merge_by_key(
A_keys + A_diag[i],
A_keys + A_diag[i+1],
B_keys + B_diag[i],
B_keys + B_diag[i+1],
A_values + A_diag[i],
B_values + B_diag[i],
C_keys + i * n_partition,
C_values + i * n_partition );
}
/* for (uint32 i = 1; i < C_len; ++i)
{
if (C_keys[i-1] > C_keys[i])
{
fprintf(stderr, "merging error at %u: %llu, %llu\n", i, C_keys[i-1], C_keys[i] );
exit(1);
}
}*/
}
template <
typename key_iterator1,
typename key_iterator2,
typename value_iterator1,
typename value_iterator2,
typename key_output,
typename value_output>
void merge_by_key(
const device_tag tag,
const uint32 A_len,
const uint32 B_len,
const key_iterator1 A_keys,
const key_iterator2 B_keys,
const value_iterator1 A_values,
const value_iterator2 B_values,
key_output C_keys,
value_output C_values)
{
thrust::merge_by_key(
A_keys,
A_keys + A_len,
B_keys,
B_keys + A_len,
A_values,
B_values,
C_keys,
C_values );
}
template <
typename system_tag,
typename key_iterator1,
typename key_iterator2,
typename value_iterator1,
typename value_iterator2,
typename key_output,
typename value_output>
void merge_by_key(
const uint32 A_len,
const uint32 B_len,
const key_iterator1 A_keys,
const key_iterator2 B_keys,
const value_iterator1 A_values,
const value_iterator2 B_values,
key_output C_keys,
value_output C_values,
nvbio::vector<system_tag,uint8>& temp_storage)
{
merge_by_key(
system_tag(),
A_len,
B_len,
A_keys,
B_keys,
A_values,
B_values,
C_keys,
C_values );
}
#if defined(__CUDACC__)
/// A very simple for_each CUDA kernel
///
template <typename iterator_type, typename functor_type>
__global__
void for_each_kernel(const uint64 n, const iterator_type in, const functor_type f)
{
const uint32 grid_size = blockDim.x * gridDim.x;
for (uint64 i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += grid_size)
f( in[i] );
};
#endif
// ask the optimizer how many blocks we should try using next
//
template <typename KernelFunction>
uint32 for_each_enactor<device_tag>::suggested_blocks(KernelFunction kernel, const uint32 cta_size) const
{
#if defined(__CUDACC__)
if (m_blocks_hi == 0)
return cuda::multiprocessor_count() * cuda::max_active_blocks_per_multiprocessor( kernel, cta_size, 0u );
else if (m_blocks_lo == 0)
return cuda::multiprocessor_count();
else
return cuda::multiprocessor_count() * (m_blocks_lo + m_blocks_hi) / 2;
#else
return 0u;
#endif
}
// update the optimizer's internal state with the latest speed data-point
//
inline
void for_each_enactor<device_tag>::update(const uint32 n_blocks, const float speed)
{
#if defined(__CUDACC__)
// carry out a little binary search over the best number of blocks/SM
if (m_blocks_hi == 0)
{
m_blocks_hi = n_blocks / cuda::multiprocessor_count();
m_speed_hi = speed;
}
else if (m_blocks_lo == 0)
{
m_blocks_lo = n_blocks / cuda::multiprocessor_count();
m_speed_lo = speed;
}
else if (m_speed_lo > m_speed_hi)
{
m_blocks_hi = n_blocks / cuda::multiprocessor_count();
m_speed_hi = speed;
}
else
{
m_blocks_lo = n_blocks / cuda::multiprocessor_count();
m_speed_lo = speed;
}
// TODO: once the optimizer settles to a given value, it will never change:
// we should explore using occasional "mutations" to adapt to possibly
// changing conditions...
#endif
}
// enact the for_each
//
template <typename Iterator, typename Functor>
void for_each_enactor<device_tag>::operator () (
const uint64 n,
const Iterator in,
Functor functor)
{
#if defined(__CUDACC__)
const uint32 blockdim = 128;
const uint32 n_blocks = suggested_blocks( for_each_kernel<Iterator,Functor>, blockdim );
cuda::Timer timer;
timer.start();
for_each_kernel<<<n_blocks,blockdim>>>( n, in, functor );
timer.stop();
update( n_blocks, float(n) / timer.seconds() );
#endif
}
} // namespace nvbio
|
PosTransformer.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by:
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
/** @file VectorOperators.h
* @brief Support funtions to handle position type data manged by soa
*/
#ifndef QMCPLUSPLUS_SOA_FAST_PARTICLE_OPERATORS_H
#define QMCPLUSPLUS_SOA_FAST_PARTICLE_OPERATORS_H
namespace qmcplusplus
{
//Need to reorg
#if 0
/** Dummy template class to be specialized
*
* - T1 the datatype to be transformed
* - D dimension
*/
template<class T1, unsigned D> struct PosTransformer { };
/** Specialized PosTransformer<T,3,true> using only the diagonal elements
*/
template<class T>
struct PosTransformer<T,3>
{
using Array_t=VectorSoaContainer<T,3>;
using Transformer_t=Tensor<T,3>;
inline static void
apply(const Array_t& pin, const Transformer_t& X, Array_t& pout, int first, int last)
{
const int n=last-first;
T x00=X[0],x01=X[1],x02=X[2],
x10=X[3],x11=X[4],x12=X[5],
x20=X[6],x21=X[7],x22=X[8];
const T* restrict x_in=pin.data(0)+first; ASSUME_ALIGNED(x_in);
const T* restrict y_in=pin.data(1)+first; ASSUME_ALIGNED(y_in);
const T* restrict z_in=pin.data(2)+first; ASSUME_ALIGNED(z_in);
T* restrict x_out=pout.data(0)+first; ASSUME_ALIGNED(x_out);
T* restrict y_out=pout.data(1)+first; ASSUME_ALIGNED(y_out);
T* restrict z_out=pout.data(2)+first; ASSUME_ALIGNED(z_out);
#pragma ivdep
for(int i=0; i<n; i++)
{
x_out[i]=x_in[i]*x00+y_in[i]*x10+z_in[i]*x20;
y_out[i]=x_in[i]*x01+y_in[i]*x11+z_in[i]*x21;
z_out[i]=x_in[i]*x02+y_in[i]*x12+z_in[i]*x22;
}
}
inline static void
apply(const Transformer_t& X, const Array_t& pin, Array_t& pout, int first, int last)
{
::apply(pin,X,pout,first,last);
}
inline static void
apply(Array_t& pinout, const Transformer_t& X,int first, int last)
{
const int n=last-first;
T x00=X[0],x01=X[1],x02=X[2],
x10=X[3],x11=X[4],x12=X[5],
x20=X[6],x21=X[7],x22=X[8];
T* restrict x_inout=pinout.data(0)+first; ASSUME_ALIGNED(x_inout);
T* restrict y_inout=pinout.data(1)+first; ASSUME_ALIGNED(y_inout);
T* restrict z_inout=pinout.data(2)+first; ASSUME_ALIGNED(z_inout);
#pragma ivdep
for(int i=0; i<n; i++)
{
T x=x_inout[i]*x00+y_inout[i]*x10+z_inout[i]*x20;
T y=x_inout[i]*x01+y_inout[i]*x11+z_inout[i]*x21;
T z=x_inout[i]*x02+y_inout[i]*x12+z_inout[i]*x22;
x_inout[i]=x;
y_inout[i]=y;
z_inout[i]=z;
}
}
inline static void
apply(const Transformer_t& X, Array_t& pinout, int first, int last)
{
::apply(X,pinout,first,last);
}
};
#endif
/** General conversion function from AoS[nrows][ncols] to SoA[ncols][ldb]
* @param nrows the first dimension
* @param ncols the second dimension
* @param iptr input pointer
* @param lda stride of iptr
* @param out output pointer
* @param lda strided of out
*
* Modeled after blas/lapack for lda/ldb
*/
template<typename T1, typename T2>
void PosAoS2SoA(int nrows, int ncols, const T1* restrict iptr, int lda, T2* restrict out, int ldb)
{
T2* restrict x = out;
T2* restrict y = out + ldb;
T2* restrict z = out + 2 * ldb;
#if !defined(__ibmxl__)
#pragma omp simd aligned(x, y, z)
#endif
for (int i = 0; i < nrows; ++i)
{
x[i] = iptr[i * ncols]; //x[i]=in[i][0];
y[i] = iptr[i * ncols + 1]; //y[i]=in[i][1];
z[i] = iptr[i * ncols + 2]; //z[i]=in[i][2];
}
}
/** General conversion function from SoA[ncols][ldb] to AoS[nrows][ncols]
* @param nrows the first dimension
* @param ncols the second dimension
* @param iptr input pointer
* @param lda stride of iptr
* @param out output pointer
* @param lda strided of out
*
* Modeled after blas/lapack for lda/ldb
*/
template<typename T1, typename T2>
void PosSoA2AoS(int nrows, int ncols, const T1* restrict iptr, int lda, T2* restrict out, int ldb)
{
const T1* restrict x = iptr;
const T1* restrict y = iptr + lda;
const T1* restrict z = iptr + 2 * lda;
#if !defined(__ibmxl__)
#pragma omp simd aligned(x, y, z)
#endif
for (int i = 0; i < nrows; ++i)
{
out[i * ldb] = x[i]; //out[i][0]=x[i];
out[i * ldb + 1] = y[i]; //out[i][1]=y[i];
out[i * ldb + 2] = z[i]; //out[i][2]=z[i];
}
}
#if 0
//#if defined(HAVE_MKL)
///specialization for double AoS2SoA
template<>
void PosAoS2SoA(int nrows, int ncols, const double* restrict in, int lda, double* restrict out, int ldb)
{
const double zone={1.0};
mkl_domatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb);
}
///specialization for float AoS2SoA
template<>
void PosAoS2SoA(int nrows, int ncols, const float* restrict in, int lda, float* restrict out, int ldb)
{
const float zone={1.0f};
mkl_somatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb);
}
///specialization for double SoA2AoS
template<>
void PosSoA2AoS(int nrows, int ncols, const double* restrict in, int lda, double* restrict out, int ldb)
{
const double zone={1.0};
mkl_domatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb);
}
///specialization for float SoA2AoS
template<>
void PosSoA2AoS(int nrows, int ncols, const float* restrict in, int lda, float* restrict out, int ldb)
{
const float zone={1.0f};
mkl_somatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb);
}
#endif
} // namespace qmcplusplus
#endif
|
DifferentiableLutN.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018-2019 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <cstdint>
#include <random>
#include "bb/StochasticLutModel.h"
#include "bb/Tensor.h"
#include "bb/FixedSizeConnectionTable.h"
#include "bb/StochasticOperation.h"
namespace bb {
class DifferentiableLutModel : public StochasticLutModel
{
public:
virtual Tensor GetMean(void) const = 0;
virtual Tensor GetVar(void) const = 0;
virtual double GetGamma(void) const = 0;
virtual double GetBeta(void) const = 0;
};
template <int N = 6, typename BinType = Bit, typename RealType = float>
class DifferentiableLutN : public DifferentiableLutModel
{
using _super = StochasticLutModel;
static int const NN = (1 << N);
public:
static inline std::string ModelName(void) { return "DifferentiableLut" + std::to_string(N); }
static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); }
std::string GetModelName(void) const { return ModelName(); }
std::string GetObjectName(void) const { return ObjectName(); }
protected:
bool m_host_only = false;
bool m_lut_binarize = false;
bool m_binary_mode = true;
bool m_batch_norm = true;
bool m_backward_break = false;
bool m_flagClamp = false;
indices_t m_input_shape;
indices_t m_output_shape;
FixedSizeConnectionTable<N> m_connection_table;
RealType m_unbinarize_bias = (RealType)0.25;
index_t m_max_tmp_mem_size = 256 * 1024 * 1024;
std::string m_connection;
std::shared_ptr<Tensor> m_W;
std::shared_ptr<Tensor> m_dW;
RealType m_momentum;
RealType m_gamma;
RealType m_beta;
Tensor_<RealType> m_mean; // 平均値
Tensor_<RealType> m_rstd; // 標準偏差の逆数
Tensor_<RealType> m_running_mean;
Tensor_<RealType> m_running_var;
std::mt19937_64 m_mt;
public:
struct create_t
{
indices_t output_shape; //< 出力形状
bool batch_norm = true;
bool binary = true;
std::string connection; //< 結線ルール
RealType momentum = (RealType)0.9;
RealType gamma = (RealType)0.3;
RealType beta = (RealType)0.5;
std::uint64_t seed = 1; //< 乱数シード
};
protected:
DifferentiableLutN(create_t const &create)
{
// BB_ASSERT(!create.output_shape.empty());
m_output_shape = create.output_shape;
m_connection = create.connection;
m_batch_norm = create.batch_norm;
m_binary_mode = create.binary;
m_momentum = create.momentum;
m_gamma = create.gamma;
m_beta = create.beta;
m_mt.seed(create.seed);
m_W = std::make_shared<Tensor>();
m_dW = std::make_shared<Tensor>();
if ( DataType<BinType>::type == BB_TYPE_BIT ) {
m_binary_mode = true;
}
}
void CommandProc(std::vector<std::string> args)
{
_super::CommandProc(args);
// バイナリモード設定
if ( DataType<BinType>::type != BB_TYPE_BIT ) {
if ( args.size() == 2 && args[0] == "binary" )
{
m_binary_mode = EvalBool(args[1]);
}
}
// LUTバイナライズ設定
if ( args.size() == 2 && args[0] == "lut_binarize" )
{
m_lut_binarize = EvalBool(args[1]);
}
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
// batch_norm設定
if (args.size() == 2 && args[0] == "batch_norm")
{
m_batch_norm = EvalBool(args[1]);
}
// momentum設定
if (args.size() == 2 && args[0] == "momentum")
{
m_momentum = (RealType)EvalReal(args[1]);
}
// backward_break
if (args.size() == 2 && args[0] == "backward_break")
{
m_backward_break = EvalBool(args[1]);
}
}
void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override
{
_super::PrintInfoText(os, indent, columns, nest, depth);
// os << indent << " input shape : " << GetInputShape();
// os << indent << " output shape : " << GetOutputShape();
os << indent << " binary : " << m_binary_mode;
os << indent << " batch_norm : " << m_batch_norm << std::endl;
}
public:
~DifferentiableLutN() {}
static std::shared_ptr<DifferentiableLutN> Create(create_t const &create)
{
return std::shared_ptr<DifferentiableLutN>(new DifferentiableLutN(create));
}
static std::shared_ptr<DifferentiableLutN> Create(indices_t const &output_shape, bool batch_norm = true, std::string connection = "") //, std::uint64_t seed = 1)
{
create_t create;
create.output_shape = output_shape;
create.connection = connection;
create.batch_norm = batch_norm;
create.seed = 1; //seed;
return Create(create);
}
static std::shared_ptr<DifferentiableLutN> Create(index_t output_node_size, bool batch_norm = true, std::string connection = "") //, std::uint64_t seed = 1)
{
create_t create;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
create.connection = connection;
create.batch_norm = batch_norm;
create.seed = 1; // seed;
return Create(create);
}
static std::shared_ptr<DifferentiableLutN> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11
static std::shared_ptr<DifferentiableLutN> CreatePy(
indices_t const &output_shape,
bool batch_norm = true,
bool binary = true,
std::string connection = "",
double momentum = 0.0,
double gamma = 0.3,
double beta = 0.5,
std::uint64_t seed = 1)
{
create_t create;
create.output_shape = output_shape;
create.batch_norm = batch_norm;
create.binary = binary;
create.connection = connection;
create.momentum = (RealType)momentum;
create.gamma = (RealType)gamma;
create.beta = (RealType)beta;
create.seed = seed;
return Create(create);
}
#endif
protected:
// Serialize
void DumpObjectData(std::ostream &os) const
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
SaveValue(os, m_host_only);
SaveValue(os, m_lut_binarize);
SaveValue(os, m_binary_mode);
SaveValue(os, m_batch_norm);
SaveValue(os, m_flagClamp);
SaveValue(os, m_input_shape);
SaveValue(os, m_output_shape);
m_connection_table.DumpObject(os);
m_W->DumpObject(os);
SaveValue(os, m_unbinarize_bias);
SaveValue(os, m_momentum);
SaveValue(os, m_gamma);
SaveValue(os, m_beta);
m_running_mean.DumpObject(os);
m_running_var.DumpObject(os);
}
void LoadObjectData(std::istream &is)
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
LoadValue(is, m_host_only);
LoadValue(is, m_lut_binarize);
LoadValue(is, m_binary_mode);
LoadValue(is, m_batch_norm);
LoadValue(is, m_flagClamp);
LoadValue(is, m_input_shape);
LoadValue(is, m_output_shape);
m_connection_table.LoadObject(is);
m_W->LoadObject(is);
LoadValue(is, m_unbinarize_bias);
LoadValue(is, m_momentum);
LoadValue(is, m_gamma);
LoadValue(is, m_beta);
m_running_mean.LoadObject(is);
m_running_var.LoadObject(is);
// 再構築
m_dW->Resize(m_W->GetShape(), DataType<RealType>::type);
m_dW->FillZero();
m_mean.Resize(m_output_shape);
m_rstd.Resize(m_output_shape);
}
public:
// Serialize(旧)
void Save(std::ostream &os) const
{
_super::Save(os);
SaveIndices(os, m_input_shape);
SaveIndices(os, m_output_shape);
m_connection_table.Save(os);
m_W->Save(os);
bb::SaveValue(os, m_momentum);
bb::SaveValue(os, m_gamma);
bb::SaveValue(os, m_beta);
m_running_mean.Save(os);
m_running_var.Save(os);
}
void Load(std::istream &is)
{
_super::Load(is);
m_input_shape = LoadIndices(is);
m_output_shape = LoadIndices(is);
m_connection_table.Load(is);
m_W->Load(is);
bb::LoadValue(is, m_momentum);
bb::LoadValue(is, m_gamma);
bb::LoadValue(is, m_beta);
m_running_mean.Load(is);
m_running_var.Load(is);
}
#ifdef BB_WITH_CEREAL
template <class Archive>
void save(Archive& archive, std::uint32_t const version) const
{
_super::save(archive, version);
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
archive(cereal::make_nvp("connection_table", m_connection_table));
archive(cereal::make_nvp("W", *m_W));
archive(cereal::make_nvp("gamma", m_gamma));
archive(cereal::make_nvp("beta", m_beta));
archive(cereal::make_nvp("running_mean", m_running_mean));
archive(cereal::make_nvp("running_var", m_running_var));
}
template <class Archive>
void load(Archive& archive, std::uint32_t const version)
{
_super::load(archive, version);
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
archive(cereal::make_nvp("connection_table", m_connection_table));
archive(cereal::make_nvp("W", *m_W));
archive(cereal::make_nvp("gamma", m_gamma));
archive(cereal::make_nvp("beta", m_beta));
archive(cereal::make_nvp("running_mean", m_running_mean));
archive(cereal::make_nvp("running_var", m_running_var));
}
void Save(cereal::JSONOutputArchive& archive) const
{
archive(cereal::make_nvp("DifferentiableLutN", *this));
}
void Load(cereal::JSONInputArchive& archive)
{
archive(cereal::make_nvp("DifferentiableLutN", *this));
}
#endif
Tensor &W(void) override { return *m_W; }
Tensor const &W(void) const override { return *m_W; }
Tensor &dW(void) override { return *m_dW; }
Tensor const &dW(void) const override { return *m_dW; }
Tensor GetMean(void) const override { return (Tensor)m_running_mean; }
Tensor GetVar(void) const override { return (Tensor)m_running_var; }
double GetGamma(void) const override { return (double)m_gamma; }
double GetBeta(void) const override { return (double)m_beta; }
auto lock_W(void) { return m_W->Lock<RealType>(); }
auto lock_W_const(void) const { return m_W->LockConst<RealType>(); }
auto lock_dW(void) { return m_dW->Lock<RealType>(); }
auto lock_dW_const(void) const { return m_dW->LockConst<RealType>(); }
auto lock_mean(void) { return m_running_mean.Lock(); }
auto lock_mean_const(void) const { return m_running_mean.LockConst(); }
auto lock_var(void) { return m_running_var.Lock(); }
auto lock_var_const(void) const { return m_running_var.LockConst(); }
// debug
auto lock_tmp_mean_const(void) const { return m_mean.LockConst(); }
auto lock_tmp_rstd_const(void) const { return m_rstd.LockConst(); }
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const
{
return m_output_shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const
{
return m_input_shape;
}
// connection management
index_t GetNodeConnectionSize(index_t output_node) const
{
return m_connection_table.GetInputConnectionSize(output_node);
}
void SetNodeConnectionIndex(index_t output_node, index_t input_index, index_t input_node)
{
m_connection_table.SetInputConnection(output_node, input_index, input_node);
}
index_t GetNodeConnectionIndex(index_t output_node, index_t input_index) const
{
return m_connection_table.GetInputConnection(output_node, input_index);
}
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape)
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
// 接続初期化
m_connection_table.SetShape(m_input_shape, m_output_shape);
m_connection_table.InitializeConnection(m_mt(), m_connection);
// パラメータ初期化(結局初期値は何が良いのかまだよくわからない)
m_W->Resize ({this->GetOutputNodeSize(), NN}, DataType<RealType>::type); m_W->InitNormalDistribution(0.5, 0.01, m_mt());
m_dW->Resize({this->GetOutputNodeSize(), NN}, DataType<RealType>::type); m_dW->FillZero();
m_mean.Resize(m_output_shape);
m_rstd.Resize(m_output_shape);
m_running_mean.Resize(m_output_shape); m_running_mean = (RealType)0.0;
m_running_var.Resize(m_output_shape); m_running_var = (RealType)1.0;
return m_output_shape;
}
Variables GetParameters(void)
{
Variables parameters;
if ( !this->m_parameter_lock ) {
parameters.PushBack(m_W);
}
return parameters;
}
Variables GetGradients(void)
{
Variables gradients;
if ( !this->m_parameter_lock ) {
gradients.PushBack(m_dW);
}
return gradients;
}
// ノード単位でのForward計算
std::vector<double> ForwardNode(index_t node, std::vector<double> input_value) const
{
BB_ASSERT(input_value.size() == N);
// パラメータクリップ
if ( m_flagClamp ) {
m_W->Clamp((RealType)0.0, (RealType)1.0);
(const_cast<DifferentiableLutN*>(this))->m_flagClamp = false;
}
auto W_ptr = lock_W_const();
auto running_mean_ptr = m_running_mean.LockConst();
auto running_var_ptr = m_running_var.LockConst();
RealType W[(1 << N)];
for ( int i = 0; i < (1 << N); ++i) {
W[i] = W_ptr(node, i);
if ( m_lut_binarize ) {
W[i] = ((W[i] > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
}
RealType mean = running_mean_ptr[node];
RealType var = running_var_ptr[node];
RealType rstd = (RealType)1.0 / std::sqrt(var);
RealType x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (RealType)input_value[i];
if ( m_binary_mode ) {
x[i] = (RealType)0.5 + ((x[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x[i] = std::min((RealType)1.0, std::max((RealType)0.0, x[i]));
}
}
RealType y;
StochasticOperation_Lut_Forward<RealType>(x, &y, W, N);
if ( m_batch_norm ) {
y = (y - mean) * rstd;
y = y * m_gamma + m_beta;
}
if ( m_binary_mode ) {
// binarize
y = ((y > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
else {
// hard-tanh
y = std::min(y, (RealType)1.0);
y = std::max(y, (RealType)0.0);
}
std::vector<double> result;
result.push_back((double)y);
return result;
}
FrameBuffer Forward(FrameBuffer x_buf, bool train = true)
{
BB_ASSERT(x_buf.GetType() == DataType<BinType>::type);
// SetInputShpaeされていなければ初回に設定
if (x_buf.GetShape() != this->GetInputShape()) {
SetInputShape(x_buf.GetShape());
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), this->GetOutputShape(), DataType<BinType>::type);
// backwardの為に保存
if ( train ) {
this->PushFrameBuffer(x_buf);
}
// パラメータクリップ
if ( m_flagClamp ) {
m_W->Clamp((RealType)0.0, (RealType)1.0);
m_flagClamp = false;
}
if ( m_batch_norm ) {
// with BatchNormalization
#ifdef BB_WITH_CUDA
// CUDA float
if ( N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
if ( train ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto mean_ptr = m_mean.LockDeviceMemory(true);
auto rstd_ptr = m_rstd.LockDeviceMemory(true);
auto running_mean_ptr = m_running_mean.LockDeviceMemory();
auto running_var_ptr = m_running_var.LockDeviceMemory();
bbcu_fp32_DifferentiableLutN_ForwardTraining<N>
(
(float const *)x_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float *)mean_ptr.GetAddr(),
(float *)rstd_ptr.GetAddr(),
(float *)running_mean_ptr.GetAddr(),
(float *)running_var_ptr.GetAddr(),
(float )m_gamma,
(float )m_beta,
(float )m_momentum,
(float )m_unbinarize_bias,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(float)),
(int )(m_lut_binarize ? 1 : 0),
(int )(m_binary_mode ? 1 : 0)
);
}
else {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_table_ptr = m_connection_table.LockConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto running_mean_ptr = m_running_mean.LockDeviceMemory();
auto running_var_ptr = m_running_var.LockDeviceMemory();
bbcu_fp32_DifferentiableLutN_ForwardInference<N>
(
(float const *)x_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float *)running_mean_ptr.GetAddr(),
(float *)running_var_ptr.GetAddr(),
(float )m_gamma,
(float )m_beta,
(float )m_unbinarize_bias,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(float)),
(int )(m_lut_binarize ? 1 : 0),
(int )(m_binary_mode ? 1 : 0)
);
}
return y_buf;
}
// CUDA Bit
if ( N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
if ( train ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto mean_ptr = m_mean.LockDeviceMemory(true);
auto rstd_ptr = m_rstd.LockDeviceMemory(true);
auto running_mean_ptr = m_running_mean.LockDeviceMemory();
auto running_var_ptr = m_running_var.LockDeviceMemory();
bbcu_bit_fp32_DifferentiableLutN_ForwardTraining<N>
(
(int const *)x_ptr.GetAddr(),
(int *)y_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float *)mean_ptr.GetAddr(),
(float *)rstd_ptr.GetAddr(),
(float *)running_mean_ptr.GetAddr(),
(float *)running_var_ptr.GetAddr(),
(float )m_gamma,
(float )m_beta,
(float )m_momentum,
(float )m_unbinarize_bias,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(int)),
(int )(m_lut_binarize ? 1 : 0)
);
}
else {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto running_mean_ptr = m_running_mean.LockDeviceMemoryConst();
auto running_var_ptr = m_running_var.LockDeviceMemoryConst();
bbcu_bit_fp32_DifferentiableLutN_ForwardInference<N>
(
(int const *)x_ptr.GetAddr(),
(int *)y_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float const *)running_mean_ptr.GetAddr(),
(float const *)running_var_ptr.GetAddr(),
(float )m_gamma,
(float )m_beta,
(float )m_unbinarize_bias,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(int)),
(int )(m_lut_binarize ? 1 : 0)
);
}
return y_buf;
}
#endif
{
// Generic
auto node_size = y_buf.GetNodeSize();
auto frame_size = y_buf.GetFrameSize();
RealType reciprocal_frame_size = (RealType)1.0 / frame_size;
if ( train ) {
auto x_ptr = x_buf.LockConst<BinType>();
auto y_ptr = y_buf.Lock<BinType>();
auto input_table_ptr = m_connection_table.LockConst_InputTable();
auto W_ptr = lock_W_const();
auto mean_ptr = m_mean.Lock(true);
auto rstd_ptr = m_rstd.Lock(true);
auto running_mean_ptr = m_running_mean.Lock();
auto running_var_ptr = m_running_var.Lock();
#pragma omp parallel for
for ( index_t node = 0; node < node_size; ++node ) {
RealType W[(1 << N)];
for ( int i = 0; i < (1 << N); ++i) {
W[i] = W_ptr(node, i);
if ( m_lut_binarize ) {
W[i] = ((W[i] > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
}
// 平均と分散計測
RealType s1 = 0, c1 = 0, y1, t1;
RealType s2 = 0, c2 = 0, y2, t2;
for ( index_t frame = 0; frame < frame_size; ++frame ) {
RealType x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (RealType)x_ptr.Get(frame, input_table_ptr(node, i));
if ( m_binary_mode ) {
x[i] = (RealType)0.5 + ((x[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x[i] = std::min((RealType)1.0, std::max((RealType)0.0, x[i]));
}
}
RealType y;
StochasticOperation_Lut_Forward<RealType>(x, &y, W, N);
// 集計
y1 = y - c1;
t1 = s1 + y1;
c1 = (t1 - s1) - y1;
s1 = t1;
y2 = (y * y) - c2;
t2 = s2 + y2;
c2 = (t2 - s2) - y2;
s2 = t2;
}
RealType mean = s1 * reciprocal_frame_size;
RealType var = std::max(1.0e-5f, (s2 * reciprocal_frame_size) - (mean * mean));
RealType rstd = (RealType)1.0 / std::sqrt(var);
// 書き込み
running_mean_ptr[node] = running_mean_ptr[node] * m_momentum + mean * ((RealType)1.0 - m_momentum);
running_var_ptr[node] = running_var_ptr[node] * m_momentum + var * ((RealType)1.0 - m_momentum);
mean_ptr[node] = mean;
rstd_ptr[node] = rstd;
// 正規化
for ( index_t frame = 0; frame < frame_size; ++frame ) {
// Forward計算
RealType x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (RealType)x_ptr.Get(frame, input_table_ptr(node, i));
if ( m_binary_mode ) {
x[i] = (RealType)0.5 + ((x[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x[i] = std::min((RealType)1.0, std::max((RealType)0.0, x[i]));
}
}
RealType y;
StochasticOperation_Lut_Forward<RealType>(x, &y, W, N);
y = (y - mean) * rstd;
y = y * m_gamma + m_beta;
if ( m_binary_mode ) {
// binarize
y = ((y > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
else {
// hard-tanh
y = std::min(y, (RealType)1.0);
y = std::max(y, (RealType)0.0);
}
y_ptr.Set(frame, node, y);
}
}
}
else {
auto x_ptr = x_buf.LockConst<BinType>();
auto y_ptr = y_buf.Lock<BinType>();
auto input_table_ptr = m_connection_table.LockConst_InputTable();
auto W_ptr = lock_W_const();
auto running_mean_ptr = m_running_mean.LockConst();
auto running_var_ptr = m_running_var.LockConst();
#pragma omp parallel for
for ( index_t node = 0; node < node_size; ++node ) {
RealType W[(1 << N)];
for ( int i = 0; i < (1 << N); ++i) {
W[i] = W_ptr(node, i);
if ( m_lut_binarize ) {
W[i] = ((W[i] > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
}
RealType mean = running_mean_ptr[node];
RealType var = running_var_ptr[node];
RealType rstd = (RealType)1.0 / std::sqrt(var);
// Forward計算
for ( index_t frame = 0; frame < frame_size; ++frame ) {
RealType x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (RealType)x_ptr.Get(frame, input_table_ptr(node, i));
if ( m_binary_mode ) {
x[i] = (RealType)0.5 + ((x[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x[i] = std::min((RealType)1.0, std::max((RealType)0.0, x[i]));
}
}
RealType y;
StochasticOperation_Lut_Forward<RealType>(x, &y, W, N);
y = (y - mean) * rstd;
y = y * m_gamma + m_beta;
if ( m_binary_mode ) {
// binarize
y = ((y > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
else {
// hard-tanh
y = std::min(y, (RealType)1.0);
y = std::max(y, (RealType)0.0);
}
y_ptr.Set(frame, node, y);
}
}
}
return y_buf;
}
}
else {
// None BatchNormalization
#ifdef BB_WITH_CUDA
// CUDA float
if ( N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
bbcu_fp32_StochasticLut_Forward<N>(
(float const *)x_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(float)),
(int )(m_binary_mode ? 1 : 0),
(int )(m_lut_binarize ? 1 : 0),
(float )m_unbinarize_bias
);
return y_buf;
}
// CUDA Bit->bit
if ( N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
bbcu_bit_bit_fp32_StochasticLut_Forward<N>(
(int const *)x_ptr.GetAddr(),
(int *)y_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(int)),
(int )(m_lut_binarize ? 1 : 0),
(float )m_unbinarize_bias
);
return y_buf;
}
#endif
{
// generic
auto node_size = y_buf.GetNodeSize();
auto frame_size = y_buf.GetFrameSize();
auto x_ptr = x_buf.LockConst<BinType>();
auto y_ptr = y_buf.Lock<BinType>();
auto input_table_ptr = m_connection_table.LockConst_InputTable();
auto W_ptr = lock_W_const();
#pragma omp parallel for
for ( index_t node = 0; node < node_size; ++node ) {
RealType W[(1 << N)];
for ( int i = 0; i < (1 << N); ++i) {
W[i] = W_ptr(node, i);
if ( m_lut_binarize ) {
W[i] = ((W[i] > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
}
// calc Forward
for ( index_t frame = 0; frame < frame_size; ++frame ) {
RealType x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (RealType)x_ptr.Get(frame, input_table_ptr(node, i));
if ( m_binary_mode ) {
x[i] = (RealType)0.5 + ((x[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x[i] = std::min((RealType)1.0, std::max((RealType)0.0, x[i]));
}
}
RealType y;
StochasticOperation_Lut_Forward<RealType>(x, &y, W, N);
if ( m_binary_mode ) {
// binarize
y = ((y > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
else {
// clip
y = std::min(y, (RealType)1.0);
y = std::max(y, (RealType)0.0);
}
y_ptr.Set(frame, node, y);
}
}
return y_buf;
}
}
}
FrameBuffer Backward(FrameBuffer dy_buf)
{
if (m_backward_break || dy_buf.Empty()) {
m_dW = 0;
return FrameBuffer();
}
BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type);
m_flagClamp = true;
FrameBuffer x_buf = this->PopFrameBuffer();
FrameBuffer dx_buf(dy_buf.GetFrameSize(), this->GetInputShape(), DataType<RealType>::type);
auto input_shape = this->GetInputShape();
auto output_shape = this->GetOutputShape();
auto output_node_size = this->GetOutputNodeSize();
// tmp buffer
index_t tmp_frame_size = m_max_tmp_mem_size / (sizeof(float) * output_node_size*N);
tmp_frame_size = std::max(tmp_frame_size, (index_t)32);
tmp_frame_size = ((tmp_frame_size + 31) & ~0x1f);
tmp_frame_size = std::min(tmp_frame_size, dy_buf.GetFrameSize());
FrameBuffer tmp_buf(tmp_frame_size, {output_node_size*N}, DataType<RealType>::type);
if ( m_batch_norm ) {
auto mean = m_mean;
auto rstd = m_rstd;
if ( this->m_parameter_lock ) {
mean = m_running_mean + 0;
rstd = (RealType)1.0 / (m_running_var + (RealType)1.0e-7).Sqrt();
}
// with BatchNormalization
#ifdef BB_WITH_CUDA
// CUDA float
if ( N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && tmp_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
Tensor_<RealType> dmean(output_shape);
Tensor_<RealType> dvar(output_shape);
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto tmp_ptr = tmp_buf.LockDeviceMemory(true);
auto reverse_table_ptr = m_connection_table.LockDeviceMemConst_ReverseTable();
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto dW_ptr = m_dW->LockDeviceMemory();
auto mean_ptr = mean.LockDeviceMemoryConst();
auto rstd_ptr = rstd.LockDeviceMemoryConst();
auto dmean_ptr = dmean.LockDeviceMemory(true);
auto dvar_ptr = dvar.LockDeviceMemory(true);
bbcu_fp32_DifferentiableLutN_Backward<N>
(
(float const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(float *)tmp_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(int const *)reverse_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float *)dW_ptr.GetAddr(),
(float const *)mean_ptr.GetAddr(),
(float const *)rstd_ptr.GetAddr(),
(float *)dmean_ptr.GetAddr(),
(float *)dvar_ptr.GetAddr(),
(float )m_gamma,
(float )m_beta,
(float )m_unbinarize_bias,
(int )m_connection_table.GetReverseTableStride(),
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float)),
(int )tmp_buf.GetFrameSize(),
(int )(tmp_buf.GetFrameStride() / sizeof(float)),
(int )(m_lut_binarize ? 1 : 0),
(int )(m_binary_mode ? 1 : 0)
);
return dx_buf;
}
// CUDA bit
if ( N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && tmp_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
Tensor_<RealType> dmean(output_shape);
Tensor_<RealType> dvar(output_shape);
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto tmp_ptr = tmp_buf.LockDeviceMemory(true);
auto reverse_table_ptr = m_connection_table.LockDeviceMemConst_ReverseTable();
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto dW_ptr = m_dW->LockDeviceMemory();
auto mean_ptr = m_mean.LockDeviceMemoryConst();
auto rstd_ptr = m_rstd.LockDeviceMemoryConst();
auto dmean_ptr = dmean.LockDeviceMemory(true);
auto dvar_ptr = dvar.LockDeviceMemory(true);
bbcu_bit_fp32_DifferentiableLutN_Backward<N>
(
(int const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(float *)tmp_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(int const *)reverse_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float *)dW_ptr.GetAddr(),
(float const *)mean_ptr.GetAddr(),
(float const *)rstd_ptr.GetAddr(),
(float *)dmean_ptr.GetAddr(),
(float *)dvar_ptr.GetAddr(),
(float )m_gamma,
(float )m_beta,
(float )m_unbinarize_bias,
(int )m_connection_table.GetReverseTableStride(),
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float)),
(int )(x_buf.GetFrameStride() / sizeof(int)),
(int )tmp_buf.GetFrameSize(),
(int )(tmp_buf.GetFrameStride() / sizeof(int)),
(int )m_lut_binarize
);
return dx_buf;
}
#endif
{
// generic
dx_buf.FillZero();
auto node_size = dy_buf.GetNodeSize();
auto frame_size = dy_buf.GetFrameSize();
auto reciprocal_frame_size = (RealType)1.0 / (RealType)frame_size;
auto x_ptr = x_buf.LockConst<BinType>();
auto dy_ptr = dy_buf.LockConst<RealType>();
auto dx_ptr = dx_buf.Lock<RealType>(true);
auto input_table_ptr = m_connection_table.LockConst_InputTable();
auto W_ptr = lock_W_const();
auto dW_ptr = lock_dW();
auto mean_ptr = m_mean.LockConst();
auto rstd_ptr = m_rstd.LockConst();
for ( index_t node = 0; node < node_size; ++node ) {
RealType W[(1 << N)];
for ( int i = 0; i < (1 << N); ++i) {
W[i] = W_ptr(node, i);
if ( m_lut_binarize ) {
W[i] = ((W[i] > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
}
RealType dW[(1 << N)] = {0};
// 平均分散の勾配計算
RealType mean = mean_ptr[node];
RealType rstd = rstd_ptr[node];
RealType rstd2 = rstd * rstd;
RealType dmeanx = 0;
RealType dstd = 0;
for ( index_t frame = 0; frame < frame_size; ++frame ) {
// x を再計算
RealType x_vec[N];
for ( int i = 0; i < N; ++i) {
x_vec[i] = (RealType)x_ptr.Get(frame, input_table_ptr(node, i));
if ( m_binary_mode ) {
x_vec[i] = (RealType)0.5 + ((x_vec[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x_vec[i] = std::min((RealType)1.0, std::max((RealType)0.0, x_vec[i]));
}
}
RealType x;
StochasticOperation_Lut_Forward<RealType>(x_vec, &x, W, N);
// hard-tanh の入力 x を求める
RealType tanh_x = ((x - mean) * rstd) * m_gamma + m_beta;
// hard-tanh
RealType dy = dy_ptr.Get(frame, node);
if (tanh_x <= 0.0) { dy = 0.0; }
if (tanh_x >= 1.0) { dy = 0.0; }
// BatchNorm
RealType xc = x - mean;
// RealType xn = xc * rstd;
RealType dxn = m_gamma * dy;
dstd += -(dxn * xc * rstd2);
dmeanx += -(dxn * rstd);
}
RealType dvar = dstd * rstd;
RealType dmean = (dmeanx - (mean * dvar)) * reciprocal_frame_size;
// 入力の勾配 dx を求める
for ( index_t frame = 0; frame < frame_size; ++frame ) {
// x を再計算
RealType x_vec[N];
for ( int i = 0; i < N; ++i) {
x_vec[i] = (RealType)x_ptr.Get(frame, input_table_ptr(node, i));
if ( m_binary_mode ) {
x_vec[i] = (RealType)0.5 + ((x_vec[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x_vec[i] = std::min((RealType)1.0, std::max((RealType)0.0, x_vec[i]));
}
}
RealType x;
StochasticOperation_Lut_Forward<RealType>(x_vec, &x, W, N);
// hard-tanh の入力 x を求める
RealType tanh_x = ((x - mean) * rstd) * m_gamma + m_beta;
// hard-tanh
RealType dy = dy_ptr.Get(frame, node);
if (tanh_x <= 0.0) { dy = 0.0; }
if (tanh_x >= 1.0) { dy = 0.0; }
RealType dxn = dy * m_gamma;
RealType dxc = dxn * rstd;
RealType dx = dxc + dmean + (x * dvar * reciprocal_frame_size);
RealType dx_vec[N];
StochasticOperation_Lut_Backward<RealType>(x_vec, dx_vec, &dx, W, dW, N);
for ( int i = 0; i < N; ++i) {
dx_ptr.Add(frame, input_table_ptr(node, i), dx_vec[i]);
}
}
for ( int i = 0; i < (1 << N); ++i ) {
dW_ptr(node, i) += dW[i];
}
}
return dx_buf;
}
}
else {
#ifdef BB_WITH_CUDA
if ( N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto reverse_table_ptr = m_connection_table.LockDeviceMemConst_ReverseTable();
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto dW_ptr = m_dW->LockDeviceMemory();
auto tmp_ptr = tmp_buf.LockDeviceMemory();
bbcu_fp32_StochasticLut_Backward<N>(
(float const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(float *)tmp_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(int const *)reverse_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float *)dW_ptr.GetAddr(),
(int )m_connection_table.GetReverseTableStride(),
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dx_buf.GetFrameSize(),
(int )(dx_buf.GetFrameStride() / sizeof(float)),
(int )tmp_buf.GetFrameSize(),
(int )(tmp_buf.GetFrameStride() / sizeof(float)),
(int )(m_binary_mode ? 1 : 0),
(int )(m_lut_binarize ? 1 : 0),
(float )m_unbinarize_bias
);
return dx_buf;
}
// LUT6 Bit CUDA
if ( N == 6 && N >= 2 && N <= 6 && DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto reverse_table_ptr = m_connection_table.LockDeviceMemConst_ReverseTable();
auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable();
auto W_ptr = m_W->LockDeviceMemoryConst();
auto dW_ptr = m_dW->LockDeviceMemory();
auto tmp_ptr = tmp_buf.LockDeviceMemory();
bbcu_bit_fp32_StochasticLut_Backward<N>(
(int const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(float *)tmp_ptr.GetAddr(),
(int const *)input_table_ptr.GetAddr(),
(int const *)reverse_table_ptr.GetAddr(),
(float const *)W_ptr.GetAddr(),
(float *)dW_ptr.GetAddr(),
(int )m_connection_table.GetReverseTableStride(),
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dx_buf.GetFrameSize(),
(int )(dx_buf.GetFrameStride() / sizeof(float)),
(int )(x_buf.GetFrameStride() / sizeof(int)),
(int )tmp_buf.GetFrameSize(),
(int )(tmp_buf.GetFrameStride() / sizeof(float)),
(int )(m_lut_binarize ? 1 : 0),
(float )m_unbinarize_bias
);
return dx_buf;
}
#endif
{
// generic
dx_buf.FillZero();
auto node_size = dy_buf.GetNodeSize();
auto frame_size = dy_buf.GetFrameSize();
// auto reciprocal_frame_size = (RealType)1.0 / (RealType)frame_size;
auto x_ptr = x_buf.LockConst<BinType>();
auto dy_ptr = dy_buf.LockConst<RealType>();
auto dx_ptr = dx_buf.Lock<RealType>(true);
auto input_table_ptr = m_connection_table.LockConst_InputTable();
auto W_ptr = lock_W_const();
auto dW_ptr = lock_dW();
for ( index_t node = 0; node < node_size; ++node ) {
RealType W[(1 << N)];
for ( int i = 0; i < (1 << N); ++i) {
W[i] = W_ptr(node, i);
if ( m_lut_binarize ) {
W[i] = ((W[i] > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0);
}
}
RealType dW[(1 << N)] = {0};
for ( index_t frame = 0; frame < frame_size; ++frame ) {
RealType x_vec[N];
for ( int i = 0; i < N; ++i) {
x_vec[i] = (RealType)x_ptr.Get(frame, input_table_ptr(node, i));
if ( m_binary_mode ) {
x_vec[i] = (RealType)0.5 + ((x_vec[i] > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias);
}
else {
x_vec[i] = std::min((RealType)1.0, std::max((RealType)0.0, x_vec[i]));
}
}
RealType dy = dy_ptr.Get(frame, node);
RealType dx_vec[N];
StochasticOperation_Lut_Backward<RealType>(x_vec, dx_vec, &dy, W, dW, N);
for ( int i = 0; i < N; ++i) {
dx_ptr.Add(frame, input_table_ptr(node, i), dx_vec[i]);
}
}
for ( int i = 0; i < (1 << N); ++i ) {
dW_ptr(node, i) += dW[i];
}
}
return dx_buf;
}
}
}
};
}
// end of file |
ex6-matrix-max-openmp-wo-cr.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define VALIDATE 0
#if VALIDATE
#include "validate.h"
#endif
int max(const size_t n, const int * restrict);
void usage(char**);
int main(int argc, char **argv)
{
int *A,max_val;
size_t i,j,n;
double t0,t1;
if(argc==2)
sscanf(argv[1],"%zu",&n);
else {
usage(argv);
return 1;
}
srand(42); // The Answer
A = (int*)malloc(n*n*sizeof(int));
for(i=0; i<n; ++i)
for(j=0; j<n; ++j)
A[i*n+j]=rand();
t0 = omp_get_wtime();
max_val = max(n,A);
t1 = omp_get_wtime();
#if VALIDATE
if(!validate_max(n,A,max_val)) {
printf("Validation failed.\n");
return 1;
}
#endif
printf("max(A) = %d\n",max_val);
printf("Total time taken: %f.\n",t1-t0);
free(A);
return 0;
}
int max(const size_t n, const int * restrict A)
{
int max_val=A[0];
size_t i,j;
#pragma omp parallel for default(none) shared(A) private(i,j) reduction(max:max_val)
for(i=0; i<n; ++i)
for(j=0; j<n; ++j)
if(A[i*n+j]>max_val)
max_val=A[i*n+j];
return max_val;
}
void usage(char **argv)
{
printf("Usage: %s <length>\n",argv[0]);
}
|
hello_openMP_v2.c | #include<stdio.h>
#include<omp.h>
int main()
{
// omp_set_num_threads(4);
// #pragma omp parallel
#pragma omp parallel num_threads(8)
{
printf("Hello World!\n");
}
return 0;
}
|
BFS.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : BFS.c
// Create : 2019-09-28 15:20:58
// Revise : 2019-09-28 15:34:05
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <omp.h>
#include "timer.h"
#include "myMalloc.h"
#include "boolean.h"
#include "arrayQueue.h"
#include "bitmap.h"
#include "graphConfig.h"
#include "reorder.h"
#include "graphCSR.h"
#include "graphGrid.h"
#include "graphAdjArrayList.h"
#include "graphAdjLinkedList.h"
#include "libcxl.h"
#include "capienv.h"
#include "BFS.h"
// ********************************************************************************************
// *************** Stats DataStructure **************
// ********************************************************************************************
struct BFSStats *newBFSStatsGraphCSR(struct GraphCSR *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->distances_DualOrder = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->parents_DualOrder = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
// optimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
// stats->parents_DualOrder[vertex_id] = 0;
if(graph->vertices->out_degree[vertex_id])
{
stats->parents[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);
stats->parents_DualOrder[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);
}
else
{
stats->parents[vertex_id] = -1;
stats->parents_DualOrder[vertex_id] = -1;
}
}
return stats;
}
struct BFSStats *newBFSStatsGraphGrid(struct GraphGrid *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances_DualOrder = NULL;
stats->parents_DualOrder = NULL;
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
stats->parents[vertex_id] = -1;
}
return stats;
}
struct BFSStats *newBFSStatsGraphAdjArrayList(struct GraphAdjArrayList *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances_DualOrder = NULL;
stats->parents_DualOrder = NULL;
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
// optimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
if(graph->vertices[vertex_id].out_degree)
stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);
else
stats->parents[vertex_id] = -1;
}
return stats;
}
struct BFSStats *newBFSStatsGraphAdjLinkedList(struct GraphAdjLinkedList *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances_DualOrder = NULL;
stats->parents_DualOrder = NULL;
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
// optimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
if(graph->vertices[vertex_id].out_degree)
stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);
else
stats->parents[vertex_id] = -1;
}
return stats;
}
void freeBFSStats(struct BFSStats *stats)
{
if(stats)
{
if(stats->distances)
free(stats->distances);
if(stats->parents)
free(stats->parents);
if(stats->distances_DualOrder)
free(stats->distances_DualOrder);
if(stats->parents_DualOrder)
free(stats->parents_DualOrder);
free(stats);
}
}
void syncDualOrderParentArrays(int **parents, int **parents_DualOrder, uint32_t *labels, uint32_t num_vertices)
{
uint32_t vertex_id;
uint32_t vertex_v;
int *parents_temp;
uint32_t num_threads_max = omp_get_max_threads();
#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(parents,parents_DualOrder,labels,num_vertices) num_threads(num_threads_max)
for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)
{
vertex_v = labels[vertex_id];
// vertex_u = inv_labels[vertex_id];
if((*parents)[vertex_id] >= 0)
{
(*parents_DualOrder)[vertex_v] = labels[(*parents)[vertex_id]];
}
else
{
(*parents_DualOrder)[vertex_v] = (*parents)[vertex_id];
}
}
parents_temp = *parents;
*parents = *parents_DualOrder;
*parents_DualOrder = parents_temp;
}
void syncDualOrderDistancesArrays(uint32_t *distances, uint32_t *distances_DualOrder, uint32_t *labels, uint32_t num_vertices)
{
uint32_t vertex_id;
uint32_t vertex_v;
// uint32_t vertex_u;
uint32_t *distances_temp;
uint32_t num_threads_max = omp_get_max_threads();
#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(distances,distances_DualOrder,labels,num_vertices) num_threads(num_threads_max)
for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)
{
vertex_v = labels[vertex_id];
// vertex_u = inv_labels[vertex_id];
distances_DualOrder[vertex_v] = distances[vertex_id];
}
distances_temp = distances;
distances = distances_DualOrder;
distances_DualOrder = distances_temp;
}
// ********************************************************************************************
// *************** CSR DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphCSR(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphCSR(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphCSR(arguments, graph);
break;
case 3: // push-bitmap queue instead of array queue
stats = breadthFirstSearchPushBitmapGraphCSR(arguments, graph);
break;
case 4: // pull/push-bitmap queue instead of array queue
stats = breadthFirstSearchPushDirectionOptimizedBitmapGraphCSR(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphCSR(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup CAPI **************
// ********************************************************************************************
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = stats->parents;
wedGraphCSR->auxiliary2 = stats->distances;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_config_3 = (uint64_t)workListCurr; // non zero CU triggers the AFU to work
afu_status.cu_config_4 = (uint64_t)workListNext; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
// ********************************************************************************************
startAFU(&afu, &afu_status);
// ********************************************************************************************
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
workListNext[arguments->source] = 1;
nf = 1;
stats->parents[arguments->source] = arguments->source;
swapWorkLists(&workListCurr, &workListNext);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (nf)
{
Start(timer_inner);
afu_status.cu_config_3 = (uint64_t)workListCurr; // non zero CU triggers the AFU to work
afu_status.cu_config_4 = (uint64_t)workListNext; // non zero CU triggers the AFU to work
nf = bottomUpStepGraphCSRCAPI(graph, workListCurr, workListNext, stats, &afu_status, afu);
swapWorkLists(&workListCurr, &workListNext);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(wedGraphCSR);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphCSR(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/PULL(SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices->out_degree[arguments->source]; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmap(sharedFrontierQueue, bitmapCurr);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphCSR(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
bitmapToArrayQueue(bitmapCurr, sharedFrontierQueue, localFrontierQueues);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphCSR(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphCSR(struct GraphCSR *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t edge_idx;
uint32_t mf = 0;
#pragma omp parallel default (none) private(u,v,j,i,edge_idx) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + graph->vertices->out_degree[v]) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
mf += -(u_parent);
stats->distances[u] = stats->distances[v] + 1;
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents) //pull
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphCSR(struct GraphCSR *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
uint32_t edge_idx;
uint32_t out_degree;
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
out_degree = vertices->out_degree[v];
if(stats->parents[v] < 0) // optmization
{
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
//we are not considering distance array as it is not implemented in AccelGraph
stats->distances[v] = stats->distances[u] + 1;
setBitAtomic(bitmapNext, v);
nf++;
break;
}
}
}
}
return nf;
}
uint32_t bottomUpStepGraphCSRCAPI(struct GraphCSR *graph, uint8_t *workListCurr, uint8_t *workListNext, struct BFSStats *stats, struct AFUStatus *afu_status, struct cxl_afu_h *afu)
{
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, afu_status);
// ********************************************************************************************
nf = afu_status->cu_return_done_2;
return nf;
}
// ********************************************************************************************
// *************** CSR DataStructure/Bitmap Frontiers **************
// ********************************************************************************************
// / breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushBitmapGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
topDownStepUsingBitmapsGraphCSR(graph, sharedFrontierQueue, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = getNumOfSetBits(sharedFrontierQueue->q_bitmap_next);
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->q_bitmap->numSetBits;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->q_bitmap->numSetBits, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushDirectionOptimizedBitmapGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/PULL Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices->out_degree[arguments->source]; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
if(mf > (mu / alpha))
{
nf = sharedFrontierQueue->q_bitmap->numSetBits;
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphCSR(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
mu -= mf;
Start(timer_inner);
mf = topDownStepUsingBitmapsGraphCSR(graph, sharedFrontierQueue, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = getNumOfSetBits(sharedFrontierQueue->q_bitmap_next);
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->q_bitmap->numSetBits;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->q_bitmap->numSetBits, Seconds(timer_inner));
}
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
uint32_t topDownStepUsingBitmapsGraphCSR(struct GraphCSR *graph, struct ArrayQueue *sharedFrontierQueue, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t edge_idx;
uint32_t mf = 0;
#pragma omp parallel default (none) private(u,v,j,i,edge_idx) shared(stats,graph,sharedFrontierQueue,mf)
{
#pragma omp for reduction(+:mf)
for(i = 0 ; i < (sharedFrontierQueue->q_bitmap->size); i++)
{
if(getBit(sharedFrontierQueue->q_bitmap, i))
{
// processed_nodes++;
v = i;
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + graph->vertices->out_degree[v]) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
mf += -(u_parent);
stats->distances[u] = stats->distances[v] + 1;
setBitAtomic(sharedFrontierQueue->q_bitmap_next, u);
}
}
}
}
}
}
return mf;
}
// ********************************************************************************************
// *************** CSR DataStructure DualOrder **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphCSRDualOrder(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphCSRDualOrder(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphCSRDualOrder(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphCSRDualOrder(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
#if DIRECTED
arguments->source = graph->inverse_sorted_edges_array->label_array[arguments->source];
#else
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
#endif
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS DualOrder PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
nf = bottomUpStepGraphCSRDualOrder(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS DualOrder PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphCSRDualOrder(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS DualOrder PUSH/PULL(SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices->out_degree[arguments->source]; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmapDualOrder(sharedFrontierQueue, bitmapCurr, graph->sorted_edges_array->inverse_label_array);
syncDualOrderParentArrays(&(stats->parents), &(stats->parents_DualOrder), graph->sorted_edges_array->inverse_label_array, graph->num_vertices);
// syncDualOrderDistancesArrays(stats->distances, stats->distances_DualOrder, graph->sorted_edges_array->label_array, graph->inverse_sorted_edges_array->label_array, graph->num_vertices);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphCSRDualOrder(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
syncDualOrderParentArrays(&(stats->parents), &(stats->parents_DualOrder), graph->inverse_sorted_edges_array->inverse_label_array, graph->num_vertices);
// syncDualOrderDistancesArrays(stats->distances, stats->distances_DualOrder, graph->inverse_sorted_edges_array->label_array, graph->sorted_edges_array->label_array, graph->num_vertices);
bitmapToArrayQueueDualOrder(bitmapCurr, sharedFrontierQueue, localFrontierQueues, graph->inverse_sorted_edges_array->inverse_label_array);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphCSRDualOrder(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphCSRDualOrder(struct GraphCSR *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t edge_idx;
uint32_t mf = 0;
#pragma omp parallel default (none) private(u,v,j,i,edge_idx) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + graph->vertices->out_degree[v]) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
mf += -(u_parent);
stats->distances[u] = stats->distances[v] + 1;
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents) //pull
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphCSRDualOrder(struct GraphCSR *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
uint32_t edge_idx;
uint32_t out_degree;
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
out_degree = vertices->out_degree[v];
if(stats->parents[v] < 0) // optmization
{
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
//we are not considering distance array as it is not implemented in AccelGraph
stats->distances[v] = stats->distances[u] + 1;
setBitAtomic(bitmapNext, v);
nf++;
break;
}
}
}
}
return nf;
}
// ********************************************************************************************
// *************** GRID DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchRowGraphGrid(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchRowGraphGridBitmap(arguments, graph);
break;
case 2: // pull
stats = breadthFirstSearchColumnGraphGrid(arguments, graph);
break;
case 3: // push
stats = breadthFirstSearchColumnGraphGridBitmap(arguments, graph);
break;
default:// push
stats = breadthFirstSearchRowGraphGrid(arguments, graph);
break;
}
return stats;
}
// function STREAMVERTICES(Fv,F)
// Sum = 0
// for each vertex do
// if F(vertex) then
// Sum += Fv(edge)
// end if
// end for
// return Sum
// end function
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
struct BFSStats *breadthFirstSearchRowGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Row (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
#pragma omp parallel for
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
graphGridReset(graph);
uint32_t processed_nodes = 0;
Start(timer_iteration);
enArrayQueue(sharedFrontierQueue, arguments->source);
arrayQueueGenerateBitmap(sharedFrontierQueue);
stats->parents[arguments->source] = arguments->source;
// graphGridSetActivePartitions(graph->grid, arguments->source);
graphGridSetActivePartitionsMap(graph->grid, arguments->source);
Stop(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, ++processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesRowGraphGrid(graph, sharedFrontierQueue, localFrontierQueues, stats);
Stop(timer_iteration);
processed_nodes = sharedFrontierQueue->tail_next - sharedFrontierQueue->tail;
slideWindowArrayQueue(sharedFrontierQueue);
arrayQueueGenerateBitmap(sharedFrontierQueue);
breadthFirstSearchSetActivePartitions(graph, sharedFrontierQueue);
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", sharedFrontierQueue->tail_next, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", sharedFrontierQueue->tail_next, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
// #pragma omp parallel for
// for(i=0 ; i < P*P ; i++){
// freeArrayQueue(localFrontierQueuesL2[i]);
// }
// free(localFrontierQueuesL2);
free(localFrontierQueues);
free(timer_iteration);
free(timer);
return stats;
}
struct BFSStats *breadthFirstSearchColumnGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Column (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
#pragma omp parallel for
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
graphGridReset(graph);
uint32_t processed_nodes = 0;
Start(timer_iteration);
enArrayQueue(sharedFrontierQueue, arguments->source);
arrayQueueGenerateBitmap(sharedFrontierQueue);
stats->parents[arguments->source] = arguments->source;
// graphGridSetActivePartitions(graph->grid, arguments->source);
graphGridSetActivePartitionsMap(graph->grid, arguments->source);
Stop(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, ++processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesColumnGraphGrid(graph, sharedFrontierQueue, localFrontierQueues, stats);
Stop(timer_iteration);
processed_nodes = sharedFrontierQueue->tail_next - sharedFrontierQueue->tail;
slideWindowArrayQueue(sharedFrontierQueue);
arrayQueueGenerateBitmap(sharedFrontierQueue);
breadthFirstSearchSetActivePartitions(graph, sharedFrontierQueue);
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", sharedFrontierQueue->tail_next, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", sharedFrontierQueue->tail_next, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
// #pragma omp parallel for
// for(i=0 ; i < P*P ; i++){
// freeArrayQueue(localFrontierQueuesL2[i]);
// }
// free(localFrontierQueuesL2);
free(localFrontierQueues);
free(timer_iteration);
free(timer);
return stats;
}
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
void breadthFirstSearchStreamEdgesRowGraphGrid(struct GraphGrid *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
uint32_t j;
#pragma omp parallel for default(none) shared(i,stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)
for (j = 0; j < totalPartitions; ++j)
{
uint32_t t_id = omp_get_thread_num();
// uint32_t A = 0;
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j))
{
// #pragma omp task untied
// {
breadthFirstSearchPartitionGraphGrid(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), sharedFrontierQueue, localFrontierQueue, stats);
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
// }
}
}
}
// flushArrayQueueToShared(localFrontierQueue,sharedFrontierQueue);
// }
}
void breadthFirstSearchStreamEdgesColumnGraphGrid(struct GraphGrid *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
#pragma omp parallel default(none) shared(stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)
// #pragma omp single nowait
{
uint32_t t_id = omp_get_thread_num();
// uint32_t A = 0;
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
uint32_t j;
#pragma omp for
for (j = 0; j < totalPartitions; ++j)
{
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j))
{
// #pragma omp task untied
// {
breadthFirstSearchPartitionGraphGrid(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), sharedFrontierQueue, localFrontierQueue, stats);
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
// }
}
}
}
}
// flushArrayQueueToShared(localFrontierQueue,sharedFrontierQueue);
// }
}
void breadthFirstSearchPartitionGraphGrid(struct GraphGrid *graph, struct Partition *partition, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue *localFrontierQueue, struct BFSStats *stats)
{
uint32_t i;
uint32_t src;
uint32_t dest;
// #pragma omp parallel default(none) private(i,src,dest) shared(localFrontierQueuesL2,graph,partition,sharedFrontierQueue,localFrontierQueue)
// {
// uint32_t t_id = omp_get_thread_num();
// struct ArrayQueue* localFrontierQueueL2 = localFrontierQueuesL2[t_id];
// #pragma omp for schedule(dynamic, 1024)
for (i = 0; i < partition->num_edges; ++i)
{
src = partition->edgeList->edges_array_src[i];
dest = partition->edgeList->edges_array_dest[i];
int v_dest = stats->parents[dest];
if(isEnArrayQueued(sharedFrontierQueue, src) && (v_dest < 0))
{
// if(__sync_bool_compare_and_swap(&stats->parents[dest], v_dest, src))
// {
stats->parents[dest] = src;
stats->distances[dest] = stats->distances[src] + 1;
enArrayQueue(localFrontierQueue, dest);
// }
}
}
// flushArrayQueueToShared(localFrontierQueueL2,localFrontierQueue);
// // slideWindowArrayQueue(localFrontierQueue);
// localFrontierQueue->tail = localFrontierQueue->tail_next; // to apply to condition to the next flush
// }
}
void breadthFirstSearchSetActivePartitions(struct GraphGrid *graph, struct ArrayQueue *sharedFrontierQueue)
{
uint32_t i;
uint32_t v;
// graphGridResetActivePartitions(graph->grid);
graphGridResetActivePartitionsMap(graph->grid);
#pragma omp parallel for default(none) shared(graph,sharedFrontierQueue) private(i,v) schedule(dynamic,1024)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
// graphGridSetActivePartitions(graph->grid, v);
// if(getBit(graph->grid->activePartitionsMap,i))
graphGridSetActivePartitionsMap(graph->grid, v);
}
}
// ********************************************************************************************
// *************** GRID DataStructure/Bitmap Frontiers **************
// ********************************************************************************************
// function STREAMVERTICES(Fv,F)
// Sum = 0
// for each vertex do
// if F(vertex) then
// Sum += Fv(edge)
// end if
// end for
// return Sum
// end function
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
struct BFSStats *breadthFirstSearchRowGraphGridBitmap(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Row Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct Bitmap *FrontierBitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *FrontierBitmapNext = newBitmap(graph->num_vertices);
graphGridReset(graph);
uint32_t processed_nodes = 0;
uint32_t total_processed_nodes = 0;
Start(timer_iteration);
setBit(FrontierBitmapNext, arguments->source);
stats->parents[arguments->source] = arguments->source;
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
// printf("%u %u\n",getNumOfSetBits(FrontierBitmapCurr),getNumOfSetBits(FrontierBitmapNext) );
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
Stop(timer_iteration);
total_processed_nodes += processed_nodes;
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(processed_nodes) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesRowGraphGridBitmap(graph, FrontierBitmapCurr, FrontierBitmapNext, stats);
Stop(timer_iteration);
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
total_processed_nodes += processed_nodes;
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", total_processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", total_processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeBitmap(FrontierBitmapCurr);
freeBitmap(FrontierBitmapNext);
free(timer_iteration);
free(timer);
return stats;
}
struct BFSStats *breadthFirstSearchColumnGraphGridBitmap(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Column Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct Bitmap *FrontierBitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *FrontierBitmapNext = newBitmap(graph->num_vertices);
graphGridReset(graph);
uint32_t processed_nodes = 0;
uint32_t total_processed_nodes = 0;
Start(timer_iteration);
setBit(FrontierBitmapNext, arguments->source);
stats->parents[arguments->source] = arguments->source;
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
// printf("%u %u\n",getNumOfSetBits(FrontierBitmapCurr),getNumOfSetBits(FrontierBitmapNext) );
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
Stop(timer_iteration);
total_processed_nodes += processed_nodes;
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(processed_nodes) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesColumnGraphGridBitmap(graph, FrontierBitmapCurr, FrontierBitmapNext, stats);
Stop(timer_iteration);
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
total_processed_nodes += processed_nodes;
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", total_processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", total_processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeBitmap(FrontierBitmapCurr);
freeBitmap(FrontierBitmapNext);
free(timer_iteration);
free(timer);
return stats;
}
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
void breadthFirstSearchStreamEdgesRowGraphGridBitmap(struct GraphGrid *graph, struct Bitmap *FrontierBitmapCurr, struct Bitmap *FrontierBitmapNext, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
uint32_t j;
#pragma omp parallel for default(none) shared(i,stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)
for (j = 0; j < totalPartitions; ++j)
{
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j) && graph->grid->partitions[(i * totalPartitions) + j].num_edges)
{
breadthFirstSearchPartitionGraphGridBitmap(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), FrontierBitmapCurr, FrontierBitmapNext, stats);
}
}
}
}
void breadthFirstSearchStreamEdgesColumnGraphGridBitmap(struct GraphGrid *graph, struct Bitmap *FrontierBitmapCurr, struct Bitmap *FrontierBitmapNext, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
#pragma omp parallel default(none) shared(stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)
// #pragma omp single nowait
{
uint32_t j;
// #pragma omp for schedule(dynamic, 256)
#pragma omp for
for (j = 0; j < totalPartitions; ++j)
{
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j) && graph->grid->partitions[(i * totalPartitions) + j].num_edges)
{
breadthFirstSearchPartitionGraphGridBitmap(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), FrontierBitmapCurr, FrontierBitmapNext, stats);
}
}
}
}
}
void breadthFirstSearchPartitionGraphGridBitmap(struct GraphGrid *graph, struct Partition *partition, struct Bitmap *FrontierBitmapCurr, struct Bitmap *FrontierBitmapNext, struct BFSStats *stats)
{
uint32_t i;
uint32_t src;
uint32_t dest;
for (i = 0; i < partition->num_edges; ++i)
{
src = partition->edgeList->edges_array_src[i];
dest = partition->edgeList->edges_array_dest[i];
int v_dest = stats->parents[dest];
if((v_dest < 0))
{
if(getBit(FrontierBitmapCurr, src))
{
// if(__sync_bool_compare_and_swap(&stats->parents[dest], v_dest, src))
// {
stats->parents[dest] = src;
stats->distances[dest] = stats->distances[src] + 1;
setBitAtomic(FrontierBitmapNext, dest);
// }
}
}
}
}
void breadthFirstSearchSetActivePartitionsBitmap(struct GraphGrid *graph, struct Bitmap *FrontierBitmap)
{
uint32_t i;
graphGridResetActivePartitionsMap(graph->grid);
#pragma omp parallel for default(none) shared(graph,FrontierBitmap) private(i) schedule(dynamic,1024)
for(i = 0 ; i < FrontierBitmap->size; i++)
{
if(getBit(FrontierBitmap, i))
graphGridSetActivePartitionsMap(graph->grid, i);
}
}
// ********************************************************************************************
// *************** ArrayList DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphAdjArrayList(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphAdjArrayList(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphAdjArrayList(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphAdjArrayList(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjArrayList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
nf = bottomUpStepGraphAdjArrayList(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjArrayList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphAdjArrayList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjArrayList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices[arguments->source].out_degree; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmap(sharedFrontierQueue, bitmapCurr);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphAdjArrayList(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
bitmapToArrayQueue(bitmapCurr, sharedFrontierQueue, localFrontierQueues);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphAdjArrayList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphAdjArrayList(struct GraphAdjArrayList *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t mf = 0;
uint32_t out_degree;
struct EdgeList *outNodes;
#pragma omp parallel default (none) private(out_degree,outNodes,u,v,j,i) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
// v = deArrayQueue(sharedFrontierQueue);
outNodes = graph->vertices[v].outNodes;
out_degree = graph->vertices[v].out_degree;
for(j = 0 ; j < out_degree ; j++)
{
u = outNodes->edges_array_dest[j];
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
stats->distances[u] = stats->distances[v] + 1;
mf += -(u_parent);
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphAdjArrayList(struct GraphAdjArrayList *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
uint32_t degree;
struct EdgeList *Nodes;
#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
if(stats->parents[v] < 0) // optmization
{
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
setBitAtomic(bitmapNext, v);
stats->distances[v] = stats->distances[u] + 1;
nf++;
break;
}
}
}
}
return nf;
}
// ********************************************************************************************
// *************** LinkedList DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphAdjLinkedList(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphAdjLinkedList(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphAdjLinkedList(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphAdjLinkedList(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjLinkedList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
nf = bottomUpStepGraphAdjLinkedList(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjLinkedList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphAdjLinkedList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjLinkedList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/PUSH (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices[arguments->source].out_degree; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmap(sharedFrontierQueue, bitmapCurr);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphAdjLinkedList(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
bitmapToArrayQueue(bitmapCurr, sharedFrontierQueue, localFrontierQueues);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphAdjLinkedList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphAdjLinkedList(struct GraphAdjLinkedList *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t mf = 0;
uint32_t out_degree;
struct AdjLinkedListNode *outNodes;
#pragma omp parallel default (none) private(out_degree,outNodes,u,v,j,i) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
// v = deArrayQueue(sharedFrontierQueue);
outNodes = graph->vertices[v].outNodes;
out_degree = graph->vertices[v].out_degree;
for(j = 0 ; j < out_degree ; j++)
{
u = outNodes->dest;
outNodes = outNodes->next; // travers pointer
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
stats->distances[u] = stats->distances[v] + 1;
mf += -(u_parent);
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphAdjLinkedList(struct GraphAdjLinkedList *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
uint32_t degree;
struct AdjLinkedListNode *Nodes;
#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
if(stats->parents[v] < 0) // optmization
{
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
setBitAtomic(bitmapNext, v);
stats->distances[v] = stats->distances[u] + 1;
nf++;
break;
}
}
}
}
return nf;
}
|
penalty.h | /*
The MIT License (MIT)
Copyright (c) 2015 Konstantinos Krestenitis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "../contactpoint.h"
#include <vector>
#include <limits>
#include <float.h>
#include "../../core/algo.h"
#ifdef peanoCall
#include "peano/utils/Loop.h"
#include "tarch/multicore/Lock.h"
#include "tarch/multicore/BooleanSemaphore.h"
#endif
#define MaxNumberOfNewtonIterations 16
namespace delta {
namespace contact {
namespace detection {
void cleanPenaltyStatistics();
/**
* The penalty() operation that accepts whole batches of triangles, i.e.
* triangulated bodies, keeps some statistics about the number of
* comparisons. You can retrieve this information with this routine,
* and clean the statistics with cleanPenaltyStatistics(). The whole
* statistics stuff is not thread-safe.
*/
std::vector<int> getPenaltyIterationStatistics();
std::vector<std::vector<iREAL>> getPenaltyErrorStatistics();
/**
*
* @see hybrid() or bf() for a description of the remaining parameters.
*/
std::vector<contactpoint> penaltyStat(
const iREAL* xCoordinatesOfPointsOfGeometryA,
const iREAL* yCoordinatesOfPointsOfGeometryA,
const iREAL* zCoordinatesOfPointsOfGeometryA,
const int numberOfTrianglesOfGeometryA,
const iREAL epsilonA,
const bool frictionA,
const int particleA,
const iREAL* xCoordinatesOfPointsOfGeometryB,
const iREAL* yCoordinatesOfPointsOfGeometryB,
const iREAL* zCoordinatesOfPointsOfGeometryB,
const int numberOfTrianglesOfGeometryB,
const iREAL epsilonB,
const bool frictionB,
const int particleB);
#if defined(SharedTBB) && defined(peanoCall)
std::vector<contactpoint> penalty(
const iREAL* xCoordinatesOfPointsOfGeometryA,
const iREAL* yCoordinatesOfPointsOfGeometryA,
const iREAL* zCoordinatesOfPointsOfGeometryA,
const int numberOfTrianglesOfGeometryA,
const iREAL epsilonA,
const bool frictionA,
const int particleA,
const iREAL* xCoordinatesOfPointsOfGeometryB,
const iREAL* yCoordinatesOfPointsOfGeometryB,
const iREAL* zCoordinatesOfPointsOfGeometryB,
const int numberOfTrianglesOfGeometryB,
const iREAL epsilonB,
const bool frictionB,
const int particleB,
tarch::multicore::BooleanSemaphore &semaphore
);
#else
std::vector<contactpoint> penalty(
const iREAL* xCoordinatesOfPointsOfGeometryA,
const iREAL* yCoordinatesOfPointsOfGeometryA,
const iREAL* zCoordinatesOfPointsOfGeometryA,
const int numberOfTrianglesOfGeometryA,
const iREAL epsilonA,
const bool frictionA,
const int particleA,
const iREAL* xCoordinatesOfPointsOfGeometryB,
const iREAL* yCoordinatesOfPointsOfGeometryB,
const iREAL* zCoordinatesOfPointsOfGeometryB,
const int numberOfTrianglesOfGeometryB,
const iREAL epsilonB,
const bool frictionB,
const int particleB
);
#endif
#pragma omp declare simd
#pragma omp declare simd linear(xCoordinatesOfTriangleA:3) linear(yCoordinatesOfTriangleA:3) linear(zCoordinatesOfTriangleA:3) linear(xCoordinatesOfTriangleB:3) linear(yCoordinatesOfTriangleB:3) linear(zCoordinatesOfTriangleB:3) nomask notinbranch
extern void penaltySolver(
const iREAL *xCoordinatesOfTriangleA,
const iREAL *yCoordinatesOfTriangleA,
const iREAL *zCoordinatesOfTriangleA,
const iREAL *xCoordinatesOfTriangleB,
const iREAL *yCoordinatesOfTriangleB,
const iREAL *zCoordinatesOfTriangleB,
iREAL& xPA,
iREAL& yPA,
iREAL& zPA,
iREAL& xPB,
iREAL& yPB,
iREAL& zPB,
iREAL MaxErrorOfPenaltyMethod,
bool& failed);
/**
* This is a second variant of the penalty method. Different to the one
* above, this one does terminate as soon as the error underruns maxError
* or the number of itertions exceeds maxNumberOfNewtonIterations.
*/
void penaltySolver(
const iREAL *xCoordinatesOfTriangleA,
const iREAL *yCoordinatesOfTriangleA,
const iREAL *zCoordinatesOfTriangleA,
const iREAL *xCoordinatesOfTriangleB,
const iREAL *yCoordinatesOfTriangleB,
const iREAL *zCoordinatesOfTriangleB,
iREAL& xPA,
iREAL& yPA,
iREAL& zPA,
iREAL& xPB,
iREAL& yPB,
iREAL& zPB,
iREAL maxError,
int& numberOfNewtonIterationsRequired);
}
}
}
|
problem.sine.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#ifndef M_PI
#define M_PI 3.14159265358979323846 // in case math.h doesn't define it
#endif
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
double c1 = 2.0*M_PI;
double c2 = 6.0*M_PI;
double p = 13; // must be odd(?) and allows up to p-2 order MG
*U = pow(sin(c1*x),p )*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Ux = c1*p*cos(c1*x)*pow(sin(c1*x),p-1)*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Uy = c1*p*cos(c1*y)*pow(sin(c1*y),p-1)*pow(sin(c1*x),p)*pow(sin(c1*z),p);
*Uz = c1*p*cos(c1*z)*pow(sin(c1*z),p-1)*pow(sin(c1*x),p)*pow(sin(c1*y),p);
*Uxx = c1*c1*p*( (p-1)*pow(sin(c1*x),p-2)*pow(cos(c1*x),2) - pow(sin(c1*x),p) )*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Uyy = c1*c1*p*( (p-1)*pow(sin(c1*y),p-2)*pow(cos(c1*y),2) - pow(sin(c1*y),p) )*pow(sin(c1*x),p)*pow(sin(c1*z),p);
*Uzz = c1*c1*p*( (p-1)*pow(sin(c1*z),p-2)*pow(cos(c1*z),2) - pow(sin(c1*z),p) )*pow(sin(c1*x),p)*pow(sin(c1*y),p);
*U += pow(sin(c2*x),p )*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Ux += c2*p*cos(c2*x)*pow(sin(c2*x),p-1)*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Uy += c2*p*cos(c2*y)*pow(sin(c2*y),p-1)*pow(sin(c2*x),p)*pow(sin(c2*z),p);
*Uz += c2*p*cos(c2*z)*pow(sin(c2*z),p-1)*pow(sin(c2*x),p)*pow(sin(c2*y),p);
*Uxx += c2*c2*p*( (p-1)*pow(sin(c2*x),p-2)*pow(cos(c2*x),2) - pow(sin(c2*x),p) )*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Uyy += c2*c2*p*( (p-1)*pow(sin(c2*y),p-2)*pow(cos(c2*y),2) - pow(sin(c2*y),p) )*pow(sin(c2*x),p)*pow(sin(c2*z),p);
*Uzz += c2*c2*p*( (p-1)*pow(sin(c2*z),p-2)*pow(cos(c2*z),2) - pow(sin(c2*z),p) )*pow(sin(c2*x),p)*pow(sin(c2*y),p);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
#ifdef VECTOR_ALPHA
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
#endif
//level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U; // obviated by Richardson analysis
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
cg.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB CG code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// program cg
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "randdp.h"
#include "timers.h"
#include "print_results.h"
//---------------------------------------------------------------------
/* common / main_int_mem / */
static int colidx[NZ];
static int rowstr[NA+1];
static int iv[NZ+1+NA];
static int arow[NA+1];
static int acol[NAZ];
/* common / main_flt_mem / */
static double v[NZ];
static double aelt[NAZ];
static double a[NZ];
static double x[NA+2];
static double z[NA+2];
static double p[NA+2];
static double q[NA+2];
static double r[NA+2];
/* common /tinof/ */
static int myid, num_threads, ilow, ihigh;
#pragma omp threadprivate(myid, num_threads, ilow, ihigh)
#define max_threads 1024
static int last_n[max_threads+1];
/* common / partit_size / */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /urando/ */
static double amult;
static double tran;
#pragma omp threadprivate (amult,tran)
/* common /timers/ */
static logical timeron;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double *rnorm);
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
double v[],
int iv[]);
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int last_n[],
double v[],
int iv[],
int nzloc[],
double rcond,
double shift);
static void sprnvc(int n, int nz, int nn1, double v[], int iv[]);
static int icnvrt(double x, int ipwr2);
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
//---------------------------------------------------------------------
int main(int argc, char *argv[])
{
int i, j, k, it;
double zeta;
double rnorm;
double norm_temp1, norm_temp2;
double t, mflops, tmax;
char Class;
logical verified;
double zeta_verify_value, epsilon, err;
char *t_names[T_last];
for (i = 0; i < T_last; i++) {
timer_clear(i);
}
FILE *fp;
if ((fp = fopen("timer.flag", "r")) != NULL) {
timeron = true;
t_names[T_init] = "init";
t_names[T_bench] = "benchmk";
t_names[T_conj_grad] = "conjgd";
fclose(fp);
} else {
timeron = false;
}
timer_start(T_init);
firstrow = 0;
lastrow = NA-1;
firstcol = 0;
lastcol = NA-1;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10) {
Class = 'S';
zeta_verify_value = 8.5971775078648;
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12) {
Class = 'W';
zeta_verify_value = 10.362595087124;
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20) {
Class = 'A';
zeta_verify_value = 17.130235054029;
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60) {
Class = 'B';
zeta_verify_value = 22.712745482631;
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110) {
Class = 'C';
zeta_verify_value = 28.973605592845;
} else if (NA == 1500000 && NONZER == 21 && NITER == 100 && SHIFT == 500) {
Class = 'D';
zeta_verify_value = 52.514532105794;
} else if (NA == 9000000 && NONZER == 26 && NITER == 100 && SHIFT == 1500) {
Class = 'E';
zeta_verify_value = 77.522164599383;
} else {
Class = 'U';
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - CG Benchmark\n\n");
printf(" Size: %11d\n", NA);
printf(" Iterations: %5d\n", NITER);
printf(" Number of available threads: %5d\n", omp_get_max_threads());
printf("\n");
naa = NA;
nzz = NZ;
//---------------------------------------------------------------------
// Inialize random number generator
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(i,j,k,zeta)
{
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc(&tran, amult);
//---------------------------------------------------------------------
//
//---------------------------------------------------------------------
makea(naa, nzz, a, colidx, rowstr,
firstrow, lastrow, firstcol, lastcol,
arow,
(int (*)[NONZER+1])(void*)acol,
(double (*)[NONZER+1])(void*)aelt,
v, iv);
#pragma omp barrier
//---------------------------------------------------------------------
// Note: as a result of the above call to makea:
// values of j used in indexing rowstr go from 0 --> lastrow-firstrow
// values of colidx which are col indexes go from firstcol --> lastcol
// So:
// Shift the col index vals from actual (firstcol --> lastcol )
// to local, i.e., (0 --> lastcol-firstcol)
//---------------------------------------------------------------------
#pragma omp for
for (j = 0; j < lastrow - firstrow + 1; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol;
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp for
for (i = 0; i < NA+1; i++) {
x[i] = 1.0;
}
#pragma omp for
for (j = 0; j < lastcol - firstcol + 1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
}
zeta = 0.0;
//---------------------------------------------------------------------
//---->
// Do one iteration untimed to init all code and data page tables
//----> (then reinit, start timing, to niter its)
//---------------------------------------------------------------------
for (it = 1; it <= 1; it++) {
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) \
reduction(+:norm_temp1,norm_temp2)
for (j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
} // end of do one iteration untimed
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i)
for (i = 0; i < NA+1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_stop(T_init);
printf(" Initialization time = %15.3f seconds\n", timer_read(T_init));
timer_start(T_bench);
//---------------------------------------------------------------------
//---->
// Main Iteration for inverse power method
//---->
//---------------------------------------------------------------------
for (it = 1; it <= NITER; it++) {
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
if (timeron) timer_start(T_conj_grad);
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
if (timeron) timer_stop(T_conj_grad);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) \
reduction(+:norm_temp1,norm_temp2)
for (j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j]*z[j];
norm_temp2 = norm_temp2 + z[j]*z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
zeta = SHIFT + 1.0 / norm_temp1;
if (it == 1)
printf("\n iteration ||r|| zeta\n");
printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
} // end of main iter inv pow meth
timer_stop(T_bench);
//---------------------------------------------------------------------
// End of timed section
//---------------------------------------------------------------------
t = timer_read(T_bench);
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if (Class != 'U') {
err = fabs(zeta - zeta_verify_value) / zeta_verify_value;
if (err <= epsilon) {
verified = true;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.13E\n", zeta);
printf(" Error is %20.13E\n", err);
} else {
verified = false;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.13E\n", zeta);
printf(" The correct zeta is %20.13E\n", zeta_verify_value);
}
} else {
verified = false;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if (t != 0.0) {
mflops = (double)(2*NITER*NA)
* (3.0+(double)(NONZER*(NONZER+1))
+ 25.0*(5.0+(double)(NONZER*(NONZER+1)))
+ 3.0) / t / 1000000.0;
} else {
mflops = 0.0;
}
print_results("CG", Class, NA, 0, 0,
NITER, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
//---------------------------------------------------------------------
// More timers
//---------------------------------------------------------------------
if (timeron) {
tmax = timer_read(T_bench);
if (tmax == 0.0) tmax = 1.0;
printf(" SECTION Time (secs)\n");
for (i = 0; i < T_last; i++) {
t = timer_read(i);
if (i == T_init) {
printf(" %8s:%9.3f\n", t_names[i], t);
} else {
printf(" %8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100.0/tmax);
if (i == T_conj_grad) {
t = tmax - t;
printf(" --> %8s:%9.3f (%6.2f%%)\n", "rest", t, t*100.0/tmax);
}
}
}
}
return 0;
}
//---------------------------------------------------------------------
// Floaging point arrays here are named as in NPB1 spec discussion of
// CG algorithm
//---------------------------------------------------------------------
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double *rnorm)
{
int j, k;
int cgit, cgitmax = 25;
double d, sum, rho, rho0, alpha, beta, suml;
rho = 0.0;
sum = 0.0;
#pragma omp parallel default(shared) private(j,k,cgit,suml,alpha,beta) \
shared(d,rho0,rho,sum)
{
//---------------------------------------------------------------------
// Initialize the CG algorithm:
//---------------------------------------------------------------------
#pragma omp for
for (j = 0; j < naa+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
#pragma omp for reduction(+:rho)
for (j = 0; j < lastcol - firstcol + 1; j++) {
rho = rho + r[j]*r[j];
}
//---------------------------------------------------------------------
//---->
// The conj grad iteration loop
//---->
//---------------------------------------------------------------------
for (cgit = 1; cgit <= cgitmax; cgit++) {
#pragma omp master
{
//---------------------------------------------------------------------
// Save a temporary of rho and initialize reduction variables
//---------------------------------------------------------------------
rho0 = rho;
d = 0.0;
rho = 0.0;
}
#pragma omp barrier
//---------------------------------------------------------------------
// q = A.p
// The partition submatrix-vector multiply: use workspace w
//---------------------------------------------------------------------
//
// NOTE: this version of the multiply is actually (slightly: maybe %5)
// faster on the sp2 on 16 nodes than is the unrolled-by-2 version
// below. On the Cray t3d, the reverse is true, i.e., the
// unrolled-by-two version is some 10% faster.
// The unrolled-by-8 version below is significantly faster
// on the Cray t3d - overall speed of code is 1.5 times faster.
#pragma omp for
for (j = 0; j < lastrow - firstrow + 1; j++) {
suml = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
suml = suml + a[k]*p[colidx[k]];
}
q[j] = suml;
}
/*
for (j = 0; j < lastrow - firstrow + 1; j++) {
int i = rowstr[j];
int iresidue = (rowstr[j+1] - i) % 2;
double sum1 = 0.0;
double sum2 = 0.0;
if (iresidue == 1)
sum1 = sum1 + a[i]*p[colidx[i]];
for (k = i + iresidue; k <= rowstr[j+1] - 2; k += 2) {
sum1 = sum1 + a[k] *p[colidx[k]];
sum2 = sum2 + a[k+1]*p[colidx[k+1]];
}
q[j] = sum1 + sum2;
}
*/
/*
for (j = 0; j < lastrow - firstrow + 1; j++) {
int i = rowstr[j];
int iresidue = (rowstr[j+1] - i) % 8;
suml = 0.0;
for (k = i; k <= i + iresidue - 1; k++) {
suml = suml + a[k]*p[colidx[k]];
}
for (k = i + iresidue; k <= rowstr[j+1] - 8; k += 8) {
suml = suml + a[k ]*p[colidx[k ]]
+ a[k+1]*p[colidx[k+1]]
+ a[k+2]*p[colidx[k+2]]
+ a[k+3]*p[colidx[k+3]]
+ a[k+4]*p[colidx[k+4]]
+ a[k+5]*p[colidx[k+5]]
+ a[k+6]*p[colidx[k+6]]
+ a[k+7]*p[colidx[k+7]];
}
q[j] = suml;
}
*/
//---------------------------------------------------------------------
// Obtain p.q
//---------------------------------------------------------------------
#pragma omp for reduction(+:d)
for (j = 0; j < lastcol - firstcol + 1; j++) {
d = d + p[j]*q[j];
}
//---------------------------------------------------------------------
// Obtain alpha = rho / (p.q)
//---------------------------------------------------------------------
alpha = rho0 / d;
//---------------------------------------------------------------------
// Obtain z = z + alpha*p
// and r = r - alpha*q
//---------------------------------------------------------------------
#pragma omp for reduction(+:rho)
for (j = 0; j < lastcol - firstcol + 1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally..
//---------------------------------------------------------------------
rho = rho + r[j]*r[j];
}
//---------------------------------------------------------------------
// Obtain beta:
//---------------------------------------------------------------------
beta = rho / rho0;
//---------------------------------------------------------------------
// p = r + beta*p
//---------------------------------------------------------------------
#pragma omp for
for (j = 0; j < lastcol - firstcol + 1; j++) {
p[j] = r[j] + beta*p[j];
}
} // end of do cgit=1,cgitmax
//---------------------------------------------------------------------
// Compute residual norm explicitly: ||r|| = ||x - A.z||
// First, form A.z
// The partition submatrix-vector multiply
//---------------------------------------------------------------------
#pragma omp for
for (j = 0; j < lastrow - firstrow + 1; j++) {
suml = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
suml = suml + a[k]*z[colidx[k]];
}
r[j] = suml;
}
//---------------------------------------------------------------------
// At this point, r contains A.z
//---------------------------------------------------------------------
#pragma omp for reduction(+:sum) nowait
for (j = 0; j < lastcol-firstcol+1; j++) {
suml = x[j] - r[j];
sum = sum + suml*suml;
}
}
*rnorm = sqrt(sum);
}
//---------------------------------------------------------------------
// generate the test problem for benchmark 6
// makea generates a sparse matrix with a
// prescribed sparsity distribution
//
// parameter type usage
//
// input
//
// n i number of cols/rows of matrix
// nz i nonzeros as declared array size
// rcond r*8 condition number
// shift r*8 main diagonal shift
//
// output
//
// a r*8 array for nonzeros
// colidx i col indices
// rowstr i row pointers
//
// workspace
//
// iv, arow, acol i
// aelt r*8
//---------------------------------------------------------------------
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
double v[],
int iv[])
{
int iouter, ivelt, nzv, nn1;
int ivc[NONZER+1];
double vc[NONZER+1];
//---------------------------------------------------------------------
// nonzer is approximately (int(sqrt(nnza /n)));
//---------------------------------------------------------------------
int work;
//---------------------------------------------------------------------
// nn1 is the smallest power of two not less than n
//---------------------------------------------------------------------
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
//---------------------------------------------------------------------
// Generate nonzero positions and save for the use in sparse.
//---------------------------------------------------------------------
num_threads = omp_get_num_threads();
myid = omp_get_thread_num();
if (num_threads > max_threads) {
if (myid == 0) {
printf(" Warning: num_threads%6d exceeded an internal limit%6d\n",
num_threads, max_threads);
}
num_threads = max_threads;
}
work = (n + num_threads - 1)/num_threads;
ilow = work * myid;
ihigh = ilow + work;
if (ihigh > n) ihigh = n;
for (iouter = 0; iouter < ihigh; iouter++) {
nzv = NONZER;
sprnvc(n, nzv, nn1, vc, ivc);
if (iouter >= ilow) {
vecset(n, vc, ivc, &nzv, iouter+1, 0.5);
arow[iouter] = nzv;
for (ivelt = 0; ivelt < nzv; ivelt++) {
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
}
#pragma omp barrier
//---------------------------------------------------------------------
// ... make the sparse matrix from list of elements with duplicates
// (v and iv are used as workspace)
//---------------------------------------------------------------------
sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol,
aelt, firstrow, lastrow, last_n,
v, &iv[0], &iv[nz], RCOND, SHIFT);
}
//---------------------------------------------------------------------
// rows range from firstrow to lastrow
// the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
//---------------------------------------------------------------------
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int last_n[],
double v[],
int iv[],
int nzloc[],
double rcond,
double shift)
{
int nrows;
//---------------------------------------------------
// generate a sparse matrix from a list of
// [col, row, element] tri
//---------------------------------------------------
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
logical cont40;
//---------------------------------------------------------------------
// how many rows of result
//---------------------------------------------------------------------
nrows = lastrow - firstrow + 1;
j1 = ilow + 1;
j2 = ihigh + 1;
//---------------------------------------------------------------------
// ...count the number of triples in each row
//---------------------------------------------------------------------
for (j = j1; j < j2; j++) {
rowstr[j] = 0;
}
for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza];
if (j >= ilow && j < ihigh) {
j = j + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
}
if (myid == 0) {
rowstr[0] = 0;
j1 = 0;
}
for (j = j1+1; j < j2; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
if (myid < num_threads) last_n[myid] = rowstr[j2-1];
#pragma omp barrier
nzrow = 0;
if (myid < num_threads) {
for (i = 0; i < myid; i++) {
nzrow = nzrow + last_n[i];
}
}
if (nzrow > 0) {
for (j = j1; j < j2; j++) {
rowstr[j] = rowstr[j] + nzrow;
}
}
#pragma omp barrier
nza = rowstr[nrows] - 1;
//---------------------------------------------------------------------
// ... rowstr(j) now is the location of the first nonzero
// of row j of a
//---------------------------------------------------------------------
if (nza > nz) {
#pragma omp master
{
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
}
exit(EXIT_FAILURE);
}
//---------------------------------------------------------------------
// ... preload data pages
//---------------------------------------------------------------------
for (j = ilow; j < ihigh; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
v[k] = 0.0;
iv[k] = -1;
}
nzloc[j] = 0;
}
//---------------------------------------------------------------------
// ... generate actual values by summing duplicates
//---------------------------------------------------------------------
size = 1.0;
ratio = pow(rcond, (1.0 / (double)(n)));
for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza];
if (j < ilow || j >= ihigh) continue;
scale = size * aelt[i][nza];
for (nzrow = 0; nzrow < arow[i]; nzrow++) {
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
//--------------------------------------------------------------------
// ... add the identity * rcond to the generated matrix to bound
// the smallest eigenvalue from below by rcond
//--------------------------------------------------------------------
if (jcol == j && j == i) {
va = va + rcond - shift;
}
cont40 = false;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
if (iv[k] > jcol) {
//----------------------------------------------------------------
// ... insert colidx here orderly
//----------------------------------------------------------------
for (kk = rowstr[j+1]-2; kk >= k; kk--) {
if (iv[kk] > -1) {
v[kk+1] = v[kk];
iv[kk+1] = iv[kk];
}
}
iv[k] = jcol;
v[k] = 0.0;
cont40 = true;
break;
} else if (iv[k] == -1) {
iv[k] = jcol;
cont40 = true;
break;
} else if (iv[k] == jcol) {
//--------------------------------------------------------------
// ... mark the duplicated entry
//--------------------------------------------------------------
nzloc[j] = nzloc[j] + 1;
cont40 = true;
break;
}
}
if (cont40 == false) {
printf("internal error in sparse: i=%d\n", i);
exit(EXIT_FAILURE);
}
v[k] = v[k] + va;
}
}
size = size * ratio;
}
#pragma omp barrier
//---------------------------------------------------------------------
// ... remove empty entries and generate final results
//---------------------------------------------------------------------
for (j = ilow+1; j < ihigh; j++) {
nzloc[j] = nzloc[j] + nzloc[j-1];
}
if (myid < num_threads) last_n[myid] = nzloc[ihigh-1];
#pragma omp barrier
nzrow = 0;
if (myid < num_threads) {
for (i = 0; i < myid; i++) {
nzrow = nzrow + last_n[i];
}
}
if (nzrow > 0) {
for (j = ilow; j < ihigh; j++) {
nzloc[j] = nzloc[j] + nzrow;
}
}
#pragma omp barrier
#pragma omp for
for (j = 0; j < nrows; j++) {
if (j > 0) {
j1 = rowstr[j] - nzloc[j-1];
} else {
j1 = 0;
}
j2 = rowstr[j+1] - nzloc[j];
nza = rowstr[j];
for (k = j1; k < j2; k++) {
a[k] = v[nza];
colidx[k] = iv[nza];
nza = nza + 1;
}
}
#pragma omp for
for (j = 1; j < nrows+1; j++) {
rowstr[j] = rowstr[j] - nzloc[j-1];
}
nza = rowstr[nrows] - 1;
}
//---------------------------------------------------------------------
// generate a sparse n-vector (v, iv)
// having nzv nonzeros
//
// mark(i) is set to 1 if position i is nonzero.
// mark is all zero on entry and is reset to all zero before exit
// this corrects a performance bug found by John G. Lewis, caused by
// reinitialization of mark on every one of the n calls to sprnvc
//---------------------------------------------------------------------
static void sprnvc(int n, int nz, int nn1, double v[], int iv[])
{
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while (nzv < nz) {
vecelt = randlc(&tran, amult);
//---------------------------------------------------------------------
// generate an integer between 1 and n in a portable manner
//---------------------------------------------------------------------
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
//---------------------------------------------------------------------
// was this integer generated already?
//---------------------------------------------------------------------
logical was_gen = false;
for (ii = 0; ii < nzv; ii++) {
if (iv[ii] == i) {
was_gen = true;
break;
}
}
if (was_gen) continue;
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
//---------------------------------------------------------------------
// scale a double precision number x in (0,1) by a power of 2 and chop it
//---------------------------------------------------------------------
static int icnvrt(double x, int ipwr2)
{
return (int)(ipwr2 * x);
}
//---------------------------------------------------------------------
// set ith element of sparse vector (v, iv) with
// nzv nonzeros to val
//---------------------------------------------------------------------
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val)
{
int k;
logical set;
set = false;
for (k = 0; k < *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = true;
}
}
if (set == false) {
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
|
GxB_BinaryOp_ztype.c | //------------------------------------------------------------------------------
// GxB_BinaryOp_ztype: return the type of z for z=f(x,y)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// NOTE: this function is historical. Use GxB_BinaryOp_ztype_name instead.
#include "GB.h"
GrB_Info GxB_BinaryOp_ztype // return the type of z
(
GrB_Type *ztype, // return type of output z
GrB_BinaryOp binaryop // binary operator to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_BinaryOp_ztype (&ztype, binaryop)") ;
GB_RETURN_IF_NULL (ztype) ;
GB_RETURN_IF_NULL_OR_FAULTY (binaryop) ;
ASSERT_BINARYOP_OK (binaryop, "binaryop for ztype", GB0) ;
//--------------------------------------------------------------------------
// return the ztype
//--------------------------------------------------------------------------
(*ztype) = binaryop->ztype ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
GB_unaryop__identity_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint32_uint64
// op(A') function: GB_tran__identity_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint32_uint64
(
uint32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
strassen-task.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/**********************************************************************************************/
/*
* Copyright (c) 1996 Massachusetts Institute of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to use, copy, modify, and distribute the Software without
* restriction, provided the Software, including any modified copies made
* under this license, is not distributed for a fee, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE MASSACHUSETTS INSTITUTE OF TECHNOLOGY BE LIABLE
* FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* /WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of the Massachusetts
* Institute of Technology shall not be used in advertising or otherwise
* to promote the sale, use or other dealings in this Software without
* prior written authorization from the Massachusetts Institute of
* Technology.
*
*/
#include <stdlib.h>
#include "strassen.h"
/*****************************************************************************
**
** OptimizedStrassenMultiply
**
** For large matrices A, B, and C of size MatrixSize * MatrixSize this
** function performs the operation C = A x B efficiently.
**
** INPUT:
** C = (*C WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
**
** OUTPUT:
** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.)
**
*****************************************************************************/
static void OptimizedStrassenMultiply_par(double *C, double *A, double *B,
unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA,
unsigned RowWidthB, unsigned int Depth, unsigned int cutoff_depth,
unsigned cutoff_size)
{
unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */
unsigned QuadrantSizeInBytes = sizeof(double) * QuadrantSize * QuadrantSize;
unsigned Column, Row;
/************************************************************************
** For each matrix A, B, and C, we'll want pointers to each quandrant
** in the matrix. These quandrants will be addressed as follows:
** -- --
** | A A12 |
** | |
** | A21 A22 |
** -- --
************************************************************************/
double /* *A, *B, *C, */ *A12, *B12, *C12,
*A21, *B21, *C21, *A22, *B22, *C22;
double *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT;
#define T2sMULT C22
#define NumberOfVariables 11
char *Heap;
void *StartHeap;
if (MatrixSize <= cutoff_size) {
MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0);
return;
}
/* Initialize quandrant matrices */
A12 = A + QuadrantSize;
B12 = B + QuadrantSize;
C12 = C + QuadrantSize;
A21 = A + (RowWidthA * QuadrantSize);
B21 = B + (RowWidthB * QuadrantSize);
C21 = C + (RowWidthC * QuadrantSize);
A22 = A21 + QuadrantSize;
B22 = B21 + QuadrantSize;
C22 = C21 + QuadrantSize;
/* Allocate Heap Space Here */
Heap = (char*)malloc(QuadrantSizeInBytes * NumberOfVariables);
StartHeap = Heap;
/* Distribute the heap space over the variables */
S1 = (double*) Heap; Heap += QuadrantSizeInBytes;
S2 = (double*) Heap; Heap += QuadrantSizeInBytes;
S3 = (double*) Heap; Heap += QuadrantSizeInBytes;
S4 = (double*) Heap; Heap += QuadrantSizeInBytes;
S5 = (double*) Heap; Heap += QuadrantSizeInBytes;
S6 = (double*) Heap; Heap += QuadrantSizeInBytes;
S7 = (double*) Heap; Heap += QuadrantSizeInBytes;
S8 = (double*) Heap; Heap += QuadrantSizeInBytes;
M2 = (double*) Heap; Heap += QuadrantSizeInBytes;
M5 = (double*) Heap; Heap += QuadrantSizeInBytes;
T1sMULT = (double*) Heap; Heap += QuadrantSizeInBytes;
if (Depth < cutoff_depth)
{
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S1[Row * QuadrantSize + Column] = A21[RowWidthA * Row + Column] + A22[RowWidthA * Row + Column];
#pragma omp taskwait
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S2[Row * QuadrantSize + Column] = S1[Row * QuadrantSize + Column] - A[RowWidthA * Row + Column];
#pragma omp taskwait
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S4[Row * QuadrantSize + Column] = A12[Row * RowWidthA + Column] - S2[QuadrantSize * Row + Column];
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S5[Row * QuadrantSize + Column] = B12[Row * RowWidthB + Column] - B[Row * RowWidthB + Column];
#pragma omp taskwait
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S6[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - S5[Row * QuadrantSize + Column];
#pragma omp taskwait
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S8[Row * QuadrantSize + Column] = S6[Row * QuadrantSize + Column] - B21[Row * RowWidthB + Column];
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S3[Row * QuadrantSize + Column] = A[RowWidthA * Row + Column] - A21[RowWidthA * Row + Column];
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S7[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - B12[Row * RowWidthB + Column];
#pragma omp taskwait
/* M2 = A x B */
#pragma omp task untied
OptimizedStrassenMultiply_par(M2, A, B, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* M5 = S1 * S5 */
#pragma omp task untied
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T1 = S2 x S6 + M2 */
#pragma omp task untied
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T2 = T1 + S3 x S7 */
#pragma omp task untied
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C = M2 + A12 * B21 */
#pragma omp task untied
OptimizedStrassenMultiply_par(C, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
#pragma omp task untied
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C21 = T2 - A22 * S8 */
#pragma omp task untied
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
#pragma omp taskwait
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C[RowWidthC * Row + Column] += M2[Row * QuadrantSize + Column];
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C12[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C21[RowWidthC * Row + Column] = -C21[RowWidthC * Row + Column] + C22[RowWidthC * Row + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
#pragma omp taskwait
#pragma omp task private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C22[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
#pragma omp taskwait
}
else
{
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++) {
S1[Row * QuadrantSize + Column] = A21[RowWidthA * Row + Column] + A22[RowWidthA * Row + Column];
S2[Row * QuadrantSize + Column] = S1[Row * QuadrantSize + Column] - A[RowWidthA * Row + Column];
S4[Row * QuadrantSize + Column] = A12[Row * RowWidthA + Column] - S2[QuadrantSize * Row + Column];
S5[Row * QuadrantSize + Column] = B12[Row * RowWidthB + Column] - B[Row * RowWidthB + Column];
S6[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - S5[Row * QuadrantSize + Column];
S8[Row * QuadrantSize + Column] = S6[Row * QuadrantSize + Column] - B21[Row * RowWidthB + Column];
S3[Row * QuadrantSize + Column] = A[RowWidthA * Row + Column] - A21[RowWidthA * Row + Column];
S7[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - B12[Row * RowWidthB + Column];
}
/* M2 = A x B */
OptimizedStrassenMultiply_par(M2, A, B, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* M5 = S1 * S5 */
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T1 = S2 x S6 + M2 */
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T2 = T1 + S3 x S7 */
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C = M2 + A12 * B21 */
OptimizedStrassenMultiply_par(C, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C21 = T2 - A22 * S8 */
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
for (Row = 0; Row < QuadrantSize; Row++) {
for (Column = 0; Column < QuadrantSize; Column += 1) {
C[RowWidthC * Row + Column] += M2[Row * QuadrantSize + Column];
C12[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
C21[RowWidthC * Row + Column] = -C21[RowWidthC * Row + Column] + C22[RowWidthC * Row + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
C22[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
}
}
}
free(StartHeap);
}
void strassen_main_par(double *A, double *B, double *C, int n, unsigned int cutoff_size, unsigned int cutoff_depth)
{
#pragma omp parallel
#pragma omp master
OptimizedStrassenMultiply_par(C, A, B, n, n, n, n, 1, cutoff_depth, cutoff_size);
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::ast_type_traits::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(ast_type_traits::TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(ast_type_traits::TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(ast_type_traits::TraversalKind TK,
const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(ast_type_traits::TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(ast_type_traits::TraversalKind TK,
const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(
ast_type_traits::TraversalKind TK,
const internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>
&InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(
ast_type_traits::TraversalKind TK,
const internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>
&InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(const std::string &Name) {
return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = ast_type_traits::DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; })
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches RecordDecl object that are spelled with "struct."
///
/// Example matches S, but not C or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isStruct) {
return Node.isStruct();
}
/// Matches RecordDecl object that are spelled with "union."
///
/// Example matches U, but not C or S.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isUnion) {
return Node.isUnion();
}
/// Matches RecordDecl object that are spelled with "class."
///
/// Example matches C, but not S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isClass) {
return Node.isClass();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(),
Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the Stmt AST node that is marked as being the structured-block
/// of an OpenMP executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
declare-variant-6.c | double f1 (int, long, float);
double f2 (int, long, float);
#pragma omp declare variant (f1) match (user={condition(0)},construct={parallel})
double f3 (int, long, float);
#pragma omp declare variant (f1) match (construct={parallel},user={condition(score(1):1)})
double f4 (int, long, float);
double f5 (int, long, float);
#pragma omp declare variant (f5) match (user={condition(0)})
double f6 (int, long, float);
#pragma omp declare variant (f5) match (construct={parallel},user={condition(score(1):1)}) /* { dg-error "'\[^'\n\r]*f5\[^'\n\r]*' used as a variant with incompatible 'construct' selector sets" } */
double f7 (int, long, float);
double f8 (int, long, float);
#pragma omp declare variant (f8) match (user={condition(0)},construct={for})
double f9 (int, long, float);
#pragma omp declare variant (f8) match (user={condition(1)}) /* { dg-error "'\[^'\n\r]*f8\[^'\n\r]*' used as a variant with incompatible 'construct' selector sets" } */
double f10 (int, long, float);
double f11 (int, long, float);
#pragma omp declare variant (f11) match (construct={target,teams,parallel,for})
double f12 (int, long, float);
#pragma omp declare variant (f11) match (user={condition(score(1):1)},construct={target,teams,parallel,for})
double f13 (int, long, float);
#pragma omp declare variant (f11) match (implementation={vendor(gnu)},construct={target,teams,parallel}) /* { dg-error "'\[^'\n\r]*f11\[^'\n\r]*' used as a variant with incompatible 'construct' selector sets" } */
double f14 (int, long, float);
#pragma omp declare variant (f11) match (device={kind(any)},construct={teams,parallel}) /* { dg-error "'\[^'\n\r]*f11\[^'\n\r]*' used as a variant with incompatible 'construct' selector sets" } */
double f15 (int, long, float);
double f16 (int, long, float);
#pragma omp declare variant (f16) match (construct={teams,parallel})
double f17 (int, long, float);
#pragma omp declare variant (f16) match(construct={teams,parallel,for}) /* { dg-error "'\[^'\n\r]*f16\[^'\n\r]*' used as a variant with incompatible 'construct' selector sets" } */
double f18 (int, long, float);
double f19 (int, long, float);
#pragma omp declare variant (f19) match (construct={parallel})
double f20 (int, long, float);
#pragma omp declare variant (f19) match (construct={for},implementation={vendor(gnu,llvm)}) /* { dg-error "'\[^'\n\r]*f19\[^'\n\r]*' used as a variant with incompatible 'construct' selector sets" } */
double f21 (int, long, float);
|
GB_binop__ge_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int64)
// A*D function (colscale): GB (_AxD__ge_int64)
// D*A function (rowscale): GB (_DxB__ge_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int64)
// C=scalar+B GB (_bind1st__ge_int64)
// C=scalar+B' GB (_bind1st_tran__ge_int64)
// C=A+scalar GB (_bind2nd__ge_int64)
// C=A'+scalar GB (_bind2nd_tran__ge_int64)
// C type: bool
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT64 || GxB_NO_GE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_pi_num_integration.c | #include <stdio.h>
#include <omp.h>
int num_steps = 400000000;
int main()
{
int i;
double x, step, sum = 0.0;
step = 1.0 / (double)num_steps;
#pragma omp parallel num_threads(4)
#pragma omp for private(x) reduction(+:sum)
for (i = 0; i < num_steps; i++)
{
x = (i + 0.5) * step;
sum += 4.0 / (1.0 + x * x);
}
printf("pi = %.8f (sum = %.8f)\n", step * sum, sum);
return 0;
}
|
nbody.c |
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include <composer.h>
#include <mkl.h>
#include <omp.h>
#include <vec.h>
#include "nbody.h"
#include "nbody_mkl.h"
#include "nbody_composer.h"
typedef enum {
UNKNOWN = 0,
FUSED,
MKL,
MKL_COMPOSER,
} exec_mode_t;
// Piece size for pipelined execution.
long piece_size = 4096;
// Number of threads.
long threads = 1;
// Data size as a matrix dimension.
size_t data_size = 4096;
// Number of iterations to run for.
int iterations = 1;
// Mode to use
exec_mode_t mode;
exec_mode_t get_mode(char *s) {
if (strcmp("fused", s) == 0) {
return FUSED;
} else if (strcmp("mkl", s) == 0) {
return MKL;
} else if (strcmp("mklcomposer", s) == 0) {
return MKL_COMPOSER;
} else {
return UNKNOWN;
}
}
void print_matrix(int n, const double *v) {
printf("-------------------\n");
for (int i = 0; i < n; i++) {
printf("[ ");
for (int j = 0; j < n; j++) {
printf("%.8e ", v[i*n + j]);
}
printf("]\n");
}
}
void print_vector(int n, const double *v) {
printf("[ ");
for (int i = 0; i < n; i++) {
printf("%.8e ", v[i]);
}
printf("]\n");
}
galaxy_t inputs(long n, int lazy) {
vec_t m = vvals(n, 1.0, 0);
vec_t x = vvals(n, 1.0, 0);
vec_t y = vvals(n, 1.0, 0);
vec_t z = vvals(n, 1.0, 0);
double step = 1.0 / (double)n;
for (int i = 0; i < n; i++) {
m.data[i] = (((double)i * step) + 10.0) * m_sol / 10.0;
x.data[i] = (((double)i * step) - 0.5) * r_ly / 100;
y.data[i] = (((double)i * step) - 0.5) * r_ly / 100;
z.data[i] = (((double)i * step) - 0.5) * r_ly / 100;
}
if (lazy) {
composer_tolazy(m.data);
composer_tolazy(x.data);
composer_tolazy(y.data);
composer_tolazy(z.data);
}
vec_t vx = vvals(n, 0.0, lazy);
vec_t vy = vvals(n, 0.0, lazy);
vec_t vz = vvals(n, 0.0, lazy);
galaxy_t inp;
inp.n = n;
inp.m = m.data;
inp.x = x.data;
inp.y = y.data;
inp.z = z.data;
inp.vx = vx.data;
inp.vy = vy.data;
inp.vz = vz.data;
return inp;
}
/** Performs a NumPy style x - x.T subtraction over a vector. */
void set_delta(MKL_INT n, const double *x, double *out) {
#pragma omp parallel for
for (int i = 0; i < n; i++) {
double subtract = x[i];
for (int j = 0; j < n; j++) {
out[i*n+j] = x[j] - subtract;
}
}
}
/** Performs a NumPy style x * x.T multiplication over a vector. */
void set_pm(MKL_INT n, const double *x, double *out) {
#pragma omp parallel for
for (int i = 0; i < n; i++) {
double mul = x[i];
for (int j = 0; j < n; j++) {
out[i*n+j] = x[j] * mul;
}
}
}
int power_of_two(long x) {
return x && !(x & (x - 1));
}
void print_usage(char **argv) {
fprintf(stderr, "%s -m <mode> [-t <threads> -p <piece size> -s <log2 elements> -h]\n", argv[0]);
fprintf(stderr, "Available modes:\n");
fprintf(stderr, "\tfused\n"
"\tmkl\n"
"\tmklcomposer\n"
);
}
void parse_args(int argc, char **argv) {
int opt;
while ((opt = getopt(argc, argv, "m:t:p:s:h:i:")) != -1) {
switch (opt) {
case 'm':
mode = get_mode(optarg);
if (mode == UNKNOWN) {
print_usage(argv);
exit(EXIT_FAILURE);
}
break;
case 'p':
piece_size = atol(optarg);
break;
case 't':
threads = atol(optarg);
if (!power_of_two(threads) || threads > 40) {
fprintf(stderr, "threads must be power-of-2 and < 16\n");
exit(EXIT_FAILURE);
}
break;
case 'i':
iterations = atol(optarg);
break;
case 's':
data_size = atol(optarg);
break;
case 'h':
default:
print_usage(argv);
exit(EXIT_FAILURE);
}
}
}
int main(int argc, char **argv) {
parse_args(argc, argv);
if (mode == UNKNOWN) {
print_usage(argv);
exit(EXIT_FAILURE);
}
if (iterations <= 0) {
fprintf(stderr, "iterations must be greater than 0.\n");
exit(EXIT_FAILURE);
}
// Need to call this before any of the other library functions.
if (mode == MKL_COMPOSER) {
composer_init(threads, piece_size);
omp_set_num_threads(threads);
mkl_set_num_threads(1);
} else if (mode == MKL) {
mkl_set_num_threads(threads);
omp_set_num_threads(threads);
} else {
}
printf("Data Size: %ld Iterations: %d, Piece Size: %ld Threads: %ld Mode: %d\n",
data_size, iterations, piece_size, threads, mode);
// Generate inputs.
fprintf(stderr, "Initializing...");
fflush(stdout);
int lazy = (mode == MKL_COMPOSER);
// Create inputs.
galaxy_t inp = inputs(data_size, lazy);
fprintf(stderr, "done.\n");
fflush(stdout);
fprintf(stderr, "Total working set bytes: %ld\n", data_size*data_size * sizeof(double) * 10);
fprintf(stderr, "--------------------\n");
struct timeval start, end, diff;
gettimeofday(&start, NULL);
// Run function
switch (mode) {
case FUSED:
fprintf(stderr, "unimplemented\n");
exit(1);
break;
case MKL:
run_mkl(iterations, inp.n, inp.m, inp.x, inp.y, inp.z, inp.vx, inp.vy, inp.vz);
break;
case MKL_COMPOSER:
run_mkl_composer(iterations, inp.n, inp.m, inp.x, inp.y, inp.z, inp.vx, inp.vy, inp.vz);
break;
case UNKNOWN:
default:
fprintf(stderr, "unsupported case");
exit(EXIT_FAILURE);
}
fprintf(stderr, "Evaluating lazy calls...\n");
fflush(stderr);
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
double runtime = (double)diff.tv_sec + ((double)diff.tv_usec / 1000000.0);
// This should match the reference Python solution.
double result = 0;
for (int i = 0; i < inp.n; i++) {
result += inp.x[i] + inp.y[i] + inp.z[i];
}
printf("Result: %.11e\n", result);
fprintf(stderr, "\n");
printf("%f seconds\n", runtime);
fflush(stderr);
fflush(stdout);
}
|
pi.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The is the original sequential program. It uses the timer
from the OpenMP runtime library
History: Written by Tim Mattson, 11/99.
$gcc -fopenmp
*/
#include <stdio.h>
#include <omp.h>
static long num_steps = 100000000;
double step;
int main ()
{
int ID, sum = 0.0;
double x, pi;
double start_time, run_time;
step = 1.0/(double) num_steps;
start_time = omp_get_wtime();
#pragma omp parallel num_threads(8) private(ID)
{
int id = omp_get_thread_num();
int numThreads = omp_get_num_threads();
double sumHilo = 0.0; //Variable privada de cada hilo
for (int i=id; i<= num_steps; i+=numThreads ){
x = (i-0.5)*step;
sumHilo = sumHilo + 4.0/ (1.0+x*x); //False sharing. Compartición falsa, ya que usan la misma línea de caché para acceder, por lo que ralentiza y pueden solaparse
}
#pragma omp atomic
sum += sumHilo;
#pragma omp single //Solo se ejecuta una vez, para todos los procesos
printf("Número de hilos %d\n", numThreads );
}
/*
#pragma omp atomic //sin llaves y solo una sentencia, que sea actualización de variable
sum = sum + 4.0/(1.0+x*x); //condicion de carrera. Los hilos leen y escriben a la vez en esta línea
*/
/*
#pragma omp critical {
//operaciones criticas tb que deban ser atómicas
//Lo mismo que antes pero más general
}
*/
pi = sum * step;
run_time = omp_get_wtime() - start_time;
printf("\n pi with %ld steps is %lf in %lf seconds\n ",num_steps,pi,run_time);
}
|
GB_unop__identity_uint8_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_fc64)
// op(A') function: GB (_unop_tran__identity_uint8_fc64)
// C type: uint8_t
// A type: GxB_FC64_t
// cast: uint8_t cij = GB_cast_to_uint8_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_fc64)
(
uint8_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_smithW.c | /*********************************************************************************
* Smith–Waterman algorithm
* Purpose: Local alignment of nucleotide or protein sequences
* Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro
*
* Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode
* gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run
* Execution: ./omp_smithW <number_of_col> <number_of_rows>
*
* Updated by C. Liao, Jan 2nd, 2019
*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <assert.h>
#include "parameters.h"
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/*--------------------------------------------------------------------
* Constants
*/
#define PATH -1
#define NONE 0
#define UP 1
#define LEFT 2
#define DIAGONAL 3
/* End of constants */
/*--------------------------------------------------------------------
* Helpers
*/
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define max(a,b) ((a) > (b) ? a : b)
// #define DEBUG
/* End of Helpers */
/*--------------------------------------------------------------------
* Functions Prototypes
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos);
int matchMissmatchScore(long long int i, long long int j);
void backtrack(int* P, long long int maxPos);
void printMatrix(int* matrix);
void printPredecessorMatrix(int* matrix);
void generate(void);
long long int nElement(long long int i);
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj);
/* End of prototypes */
/*--------------------------------------------------------------------
* Global Variables
*/
bool useBuiltInData=true;
//Defines size of strings to be compared
long long int m = 8 ; //Columns - Size of string a
long long int n = 9; //Lines - Size of string b
// the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s.
//Defines scores
int matchScore = 3;
int missmatchScore = -3;
int gapScore = -2;
//Strings over the Alphabet Sigma
char *a, *b;
/* End of global variables */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[]) {
// thread_count is no longer used
int thread_count;
if (argc==3)
{
m = strtoll(argv[1], NULL, 10);
n = strtoll(argv[2], NULL, 10);
useBuiltInData = false;
}
//#ifdef DEBUG
if (useBuiltInData)
printf ("Using built-in data for testing ..\n");
printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF);
//#endif
//Allocates a and b
a = (char*) malloc(m * sizeof(char));
b = (char*) malloc(n * sizeof(char));
//Because now we have zeros
m++;
n++;
//Allocates similarity matrix H
int *H;
H = (int *) calloc(m * n, sizeof(int));
//Allocates predecessor matrix P
int *P;
P = (int *)calloc(m * n, sizeof(int));
if (useBuiltInData)
{
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// OBS: m=11 n=7
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
// https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example
// Using the wiki example to verify the results
b[0] = 'G';
b[1] = 'G';
b[2] = 'T';
b[3] = 'T';
b[4] = 'G';
b[5] = 'A';
b[6] = 'C';
b[7] = 'T';
b[8] = 'A';
a[0] = 'T';
a[1] = 'G';
a[2] = 'T';
a[3] = 'T';
a[4] = 'A';
a[5] = 'C';
a[6] = 'G';
a[7] = 'G';
}
else
{
//Gen random arrays a and b
generate();
}
//Start position for backtrack
long long int maxPos = 0;
//Calculates the similarity matrix
long long int i, j;
// The way to generate all wavefront is to go through the top edge elements
// starting from the left top of the matrix, go to the bottom top -> down, then left->right
// total top edge element count = dim1_size + dim2_size -1
//Because now we have zeros ((m-1) + (n-1) - 1)
long long int nDiag = m + n - 3;
#ifdef DEBUG
printf("nDiag=%d\n", nDiag);
printf("Number of wavefront lines and their first element positions:\n");
#endif
#pragma omp parallel
{
#pragma omp master
{
thread_count = omp_get_num_threads();
printf ("Using %d out of max %d threads...", thread_count, omp_get_max_threads());
}
}
//Gets Initial time
double initialTime = omp_get_wtime();
// #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i)
{
for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding
{
long long int nEle, si, sj;
nEle = nElement(i);
calcFirstDiagElement(i, &si, &sj);
#pragma omp parallel for private(j) shared (nEle, si, sj, H, P, maxPos) if (nEle>=CUTOFF)
for (j = 0; j < nEle; ++j)
{ // going upwards : anti-diagnol direction
long long int ai = si - j ; // going up vertically
long long int aj = sj + j; // going right in horizontal
similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside
}
} // for end nDiag
} // end omp parallel
double finalTime = omp_get_wtime();
printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime);
initialTime = omp_get_wtime();
backtrack(P, maxPos);
finalTime = omp_get_wtime();
//Gets backtrack time
finalTime = omp_get_wtime();
printf("Elapsed time for backtracking: %f\n", finalTime - initialTime);
if (useBuiltInData)
{
printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false");
assert (H[n*m-1]==7);
}
#ifdef DEBUG
printf("\nSimilarity Matrix:\n");
printMatrix(H);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P);
#endif
//Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
free(a);
free(b);
return 0;
} /* End of main */
/*--------------------------------------------------------------------
* Function: nElement
* Purpose: Calculate the number of i-diagonal's elements
* i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored.
*/
long long int nElement(long long int i) {
if (i < m && i < n) { // smaller than both directions
//Number of elements in the diagonal is increasing
return i;
}
else if (i < max(m, n)) { // smaller than only one direction
//Number of elements in the diagonal is stable
long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size
return min - 1;
}
else {
//Number of elements in the diagonal is decreasing
long int min = min(m, n);
return 2 * min - i + abs(m - n) - 2;
}
}
/*--------------------------------------------------------------------
* Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding
* Purpose: Calculate the position of (si, sj)-element
* n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront
*/
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) {
// Calculate the first element of diagonal
if (i < n) { // smaller than row count
*si = i;
*sj = 1; // start from the j==1 since j==0 is the padding
} else { // now we sweep horizontally at the bottom of the matrix
*si = n - 1; // i is fixed
*sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1
}
}
/*
// understanding the calculation by an example
n =6 // row
m =2 // col
padded scoring matrix
n=7
m=3
0 1 2
-------
0 x x x
1 x x x
2 x x x
3 x x x
4 x x x
5 x x x
6 x x x
We should peel off top row and left column since they are the padding
the remaining 6x2 sub matrix is what is interesting for us
Now find the number of wavefront lines and their first element's position in the scoring matrix
total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1
We use the left most element in each wavefront line as its first element.
Then we have the first elements like
(1,1),
(2,1)
(3,1)
..
(6,1) (6,2)
*/
/*--------------------------------------------------------------------
* Function: SimilarityScore
* Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j)
* int *P; the predecessor array,storing which of the three elements is picked with max value
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
diag = H[index - m - 1] + matchMissmatchScore(i, j);
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if '←' insert e '↑' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if '←' insert e '↑' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
if (max > H[*maxPos]) {
#pragma omp critical
*maxPos = index;
}
} /* End of similarityScore */
/*--------------------------------------------------------------------
* Function: matchMissmatchScore
* Purpose: Similarity function on the alphabet for match/missmatch
*/
int matchMissmatchScore(long long int i, long long int j) {
if (a[j - 1] == b[i - 1])
return matchScore;
else
return missmatchScore;
} /* End of matchMissmatchScore */
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(int* P, long long int maxPos) {
//hold maxPos value
long long int predPos;
//backtrack from maxPos to startPos = 0
do {
if (P[maxPos] == DIAGONAL)
predPos = maxPos - m - 1;
else if (P[maxPos] == UP)
predPos = maxPos - m;
else if (P[maxPos] == LEFT)
predPos = maxPos - 1;
P[maxPos] *= PATH;
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(int* matrix) {
long long int i, j;
printf("-\t-\t");
for (j = 0; j < m-1; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", matrix[m * i + j]);
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(int* matrix) {
long long int i, j, index;
printf(" ");
for (j = 0; j < m-1; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("↑ ");
else if (matrix[index] == -LEFT)
printf("← ");
else if (matrix[index] == -DIAGONAL)
printf("↖ ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("↑ ");
else if (matrix[index] == LEFT)
printf("← ");
else if (matrix[index] == DIAGONAL)
printf("↖ ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate() {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* External References:
* http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
* http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm
* http://baba.sourceforge.net/
*/
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *),
WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,const MagickBooleanType,
ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if (num_images < 1)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
register ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(MagickFalse);
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->rows && (y + j) < (ssize_t) image->columns)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) ResetMagickMemory(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
register const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
register ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
|
convolution_sgemm_pack1ton.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack1ton_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
// Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
float* tmpptr = tmp.channel(i);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
int i = 0;
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum, vl);
outptr0 += packn;
}
}
}
static void convolution_im2col_sgemm_pack1ton_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1ton_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
pubkeylp.h | /**
* @file pubkeylp.h -- Public key type for lattice crypto operations.
* @author TPOC: palisade@njit.edu
*
* @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_CRYPTO_PUBKEYLP_H
#define LBCRYPTO_CRYPTO_PUBKEYLP_H
//Includes Section
#include <vector>
#include <iomanip>
#include "lattice/elemparams.h"
#include "lattice/ilparams.h"
#include "lattice/ildcrtparams.h"
#include "lattice/ilelement.h"
#include "utils/inttypes.h"
#include "utils/hashutil.h"
#include "math/distrgen.h"
#include "utils/serializablehelper.h"
#include "encoding/encodingparams.h"
/**
* @namespace lbcrypto
* The namespace of lbcrypto
*/
namespace lbcrypto {
//forward declarations, used to resolve circular header dependencies
template<typename Element>
class CiphertextImpl;
template<typename Element>
class RationalCiphertext;
template<typename Element>
class LPCryptoParameters;
template<typename Element>
class LPCryptoParametersLTV;
template<typename Element>
class LPCryptoParametersBGV;
template<typename Element>
class LPCryptoParametersBFV;
template<typename Element>
class LPCryptoParametersStehleSteinfeld;
template<typename Element>
class CryptoObject;
struct EncryptResult {
explicit EncryptResult() : isValid(false), numBytesEncrypted(0) {}
explicit EncryptResult(size_t len) : isValid(true), numBytesEncrypted(len) {}
bool isValid; /**< whether the encryption was successful */
usint numBytesEncrypted; /**< count of the number of plaintext bytes that were encrypted */
};
/**
* @brief Decryption result. This represents whether the decryption of a cipheretext was performed correctly.
*
* This is intended to eventually incorporate information about the amount of padding in a decoded ciphertext,
* to ensure that the correct amount of padding is stripped away.
* It is intended to provided a very simple kind of checksum eventually.
* This notion of a decoding output is inherited from the crypto++ library.
* It is also intended to be used in a recover and restart robust functionality if not all ciphertext is recieved over a lossy channel, so that if all information is eventually recieved, decoding/decryption can be performed eventually.
* This is intended to be returned with the output of a decryption operation.
*/
struct DecryptResult {
/**
* Constructor that initializes all message lengths to 0.
*/
explicit DecryptResult() : isValid(false), messageLength(0) {}
/**
* Constructor that initializes all message lengths.
* @param len the new length.
*/
explicit DecryptResult(size_t len) : isValid(true), messageLength(len) {}
bool isValid; /**< whether the decryption was successful */
usint messageLength; /**< the length of the decrypted plaintext message */
};
/**
* @brief Abstract interface class for LP Keys
*
* @tparam Element a ring element.
*/
template <class Element>
class LPKey : public CryptoObject<Element>, public Serializable {
public:
LPKey(CryptoContext<Element> cc, const string& id = "") : CryptoObject<Element>(cc, id) {}
LPKey(shared_ptr<CryptoObject<Element>> co) : CryptoObject<Element>(co) {}
virtual ~LPKey() {}
};
template<typename Element>
class LPPublicKeyImpl;
template<typename Element>
using LPPublicKey = shared_ptr<LPPublicKeyImpl<Element>>;
/**
* @brief Concrete class for LP public keys
* @tparam Element a ring element.
*/
template <typename Element>
class LPPublicKeyImpl : public LPKey<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPPublicKeyImpl(CryptoContext<Element> cc, const string& id = "") : LPKey<Element>(cc, id) {}
/**
* Copy constructor
*
*@param &rhs LPPublicKeyImpl to copy from
*/
explicit LPPublicKeyImpl(const LPPublicKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
m_h = rhs.m_h;
}
/**
* Move constructor
*
*@param &rhs LPPublicKeyImpl to move from
*/
explicit LPPublicKeyImpl(LPPublicKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
m_h = std::move(rhs.m_h);
}
/**
* Assignment Operator.
*
* @param &rhs LPPublicKeyImpl to copy from
*/
const LPPublicKeyImpl<Element>& operator=(const LPPublicKeyImpl<Element> &rhs) {
this->context = rhs.context;
this->m_h = rhs.m_h;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs LPPublicKeyImpl to copy from
*/
const LPPublicKeyImpl<Element>& operator=(LPPublicKeyImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_h = std::move(rhs.m_h);
return *this;
}
//@Get Properties
/**
* Gets the computed public key
* @return the public key element.
*/
const std::vector<Element> &GetPublicElements() const {
return this->m_h;
}
//@Set Properties
/**
* Sets the public key vector of Element.
* @param &element is the public key Element vector to be copied.
*/
void SetPublicElements(const std::vector<Element> &element) {
m_h = element;
}
/**
* Sets the public key vector of Element.
* @param &&element is the public key Element vector to be moved.
*/
void SetPublicElements(std::vector<Element> &&element) {
m_h = std::move(element);
}
/**
* Sets the public key Element at index idx.
* @param &element is the public key Element to be copied.
*/
void SetPublicElementAtIndex(usint idx, const Element &element) {
m_h.insert(m_h.begin() + idx, element);
}
/**
* Sets the public key Element at index idx.
* @param &&element is the public key Element to be moved.
*/
void SetPublicElementAtIndex(usint idx, Element &&element) {
m_h.insert(m_h.begin() + idx, std::move(element));
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @param fileFlag is an object-specific parameter for the serialization
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* Populate the object from the deserialization of the Serialized
* @param &serObj contains the serialized object
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool operator==(const LPPublicKeyImpl& other) const {
if( !CryptoObject<Element>::operator ==(other) )
return false;
if( m_h.size() != other.m_h.size() )
return false;
for( size_t i = 0; i < m_h.size(); i++ )
if( m_h[i] != other.m_h[i] )
return false;
return true;
}
bool operator!=(const LPPublicKeyImpl& other) const { return ! (*this == other); }
private:
std::vector<Element> m_h;
};
template<typename Element>
class LPEvalKeyImpl;
template<typename Element>
using LPEvalKey = shared_ptr<LPEvalKeyImpl<Element>>;
/**
* @brief Abstract interface for LP evaluation/proxy keys
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyImpl : public LPKey<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyImpl(CryptoContext<Element> cc) : LPKey<Element>(cc) {}
virtual ~LPEvalKeyImpl() {}
/**
* Setter function to store Relinearization Element Vector A.
* Throws exception, to be overridden by derived class.
*
* @param &a is the Element vector to be copied.
*/
virtual void SetAVector(const std::vector<Element> &a) {
throw std::runtime_error("SetAVector copy operation not supported");
}
/**
* Setter function to store Relinearization Element Vector A.
* Throws exception, to be overridden by derived class.
*
* @param &&a is the Element vector to be moved.
*/
virtual void SetAVector(std::vector<Element> &&a) {
throw std::runtime_error("SetAVector move operation not supported");
}
/**
* Getter function to access Relinearization Element Vector A.
* Throws exception, to be overridden by derived class.
*
* @return Element vector A.
*/
virtual const std::vector<Element> &GetAVector() const {
throw std::runtime_error("GetAVector operation not supported");
}
/**
* Setter function to store Relinearization Element Vector B.
* Throws exception, to be overridden by derived class.
*
* @param &b is the Element vector to be copied.
*/
virtual void SetBVector(const std::vector<Element> &b) {
throw std::runtime_error("SetBVector copy operation not supported");
}
/**
* Setter function to store Relinearization Element Vector B.
* Throws exception, to be overridden by derived class.
*
* @param &&b is the Element vector to be moved.
*/
virtual void SetBVector(std::vector<Element> &&b) {
throw std::runtime_error("SetBVector move operation not supported");
}
/**
* Getter function to access Relinearization Element Vector B.
* Throws exception, to be overridden by derived class.
*
* @return Element vector B.
*/
virtual const std::vector<Element> &GetBVector() const {
throw std::runtime_error("GetBVector operation not supported");
}
/**
* Setter function to store key switch Element.
* Throws exception, to be overridden by derived class.
*
* @param &a is the Element to be copied.
*/
virtual void SetA(const Element &a) {
throw std::runtime_error("SetA copy operation not supported");
}
/**
* Setter function to store key switch Element.
* Throws exception, to be overridden by derived class.
*
* @param &&a is the Element to be moved.
*/
virtual void SetA(Element &&a) {
throw std::runtime_error("SetA move operation not supported");
}
/**
* Getter function to access key switch Element.
* Throws exception, to be overridden by derived class.
*
* @return Element.
*/
virtual const Element &GetA() const {
throw std::runtime_error("GetA operation not supported");
}
friend bool operator==(const LPEvalKeyImpl& a, const LPEvalKeyImpl& b) {
return a.key_compare(b);
}
friend bool operator!=(const LPEvalKeyImpl& a, LPEvalKeyImpl& b) { return ! (a == b); }
virtual bool key_compare(const LPEvalKeyImpl& other) const = 0;
};
template<typename Element>
class LPEvalKeyRelinImpl;
template<typename Element>
using LPEvalKeyRelin = shared_ptr<LPEvalKeyRelinImpl<Element>>;
/**
* @brief Concrete class for Relinearization keys of RLWE scheme
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyRelinImpl : public LPEvalKeyImpl<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyRelinImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {}
virtual ~LPEvalKeyRelinImpl() {}
/**
* Copy constructor
*
*@param &rhs key to copy from
*/
explicit LPEvalKeyRelinImpl(const LPEvalKeyRelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = rhs.m_rKey;
}
/**
* Move constructor
*
*@param &rhs key to move from
*/
explicit LPEvalKeyRelinImpl(LPEvalKeyRelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = std::move(rhs.m_rKey);
}
/**
* Assignment Operator.
*
* @param &rhs key to copy from
*/
const LPEvalKeyRelinImpl<Element>& operator=(const LPEvalKeyRelinImpl<Element> &rhs) {
this->context = rhs.context;
this->m_rKey = rhs.m_rKey;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs key to move from
*/
const LPEvalKeyRelinImpl<Element>& operator=(LPEvalKeyRelinImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_rKey = std::move(rhs.m_rKey);
return *this;
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &a is the Element vector to be copied.
*/
virtual void SetAVector(const std::vector<Element> &a) {
m_rKey.insert(m_rKey.begin() + 0, a);
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &&a is the Element vector to be moved.
*/
virtual void SetAVector(std::vector<Element> &&a) {
m_rKey.insert(m_rKey.begin() + 0, std::move(a));
}
/**
* Getter function to access Relinearization Element Vector A.
* Overrides base class implementation.
*
* @return Element vector A.
*/
virtual const std::vector<Element> &GetAVector() const {
return m_rKey.at(0);
}
/**
* Setter function to store Relinearization Element Vector B.
* Overrides base class implementation.
*
* @param &b is the Element vector to be copied.
*/
virtual void SetBVector(const std::vector<Element> &b) {
m_rKey.insert(m_rKey.begin() + 1, b);
}
/**
* Setter function to store Relinearization Element Vector B.
* Overrides base class implementation.
*
* @param &&b is the Element vector to be moved.
*/
virtual void SetBVector(std::vector<Element> &&b) {
m_rKey.insert(m_rKey.begin() + 1, std::move(b));
}
/**
* Getter function to access Relinearization Element Vector B.
* Overrides base class implementation.
*
* @return Element vector B.
*/
virtual const std::vector<Element> &GetBVector() const {
return m_rKey.at(1);
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool SerializeWithoutContext(Serialized *serObj) const;
/**
* Deserialize from the serialization
* @param serObj - contains the serialization
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool key_compare(const LPEvalKeyImpl<Element>& other) const {
const LPEvalKeyRelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyRelinImpl<Element> &>(other);
if( !CryptoObject<Element>::operator==(other) )
return false;
if( this->m_rKey.size() != oth.m_rKey.size() ) return false;
for( size_t i=0; i<this->m_rKey.size(); i++ ) {
if( this->m_rKey[i].size() != oth.m_rKey[i].size() ) return false;
for( size_t j=0; j<this->m_rKey[i].size(); j++ ) {
if( this->m_rKey[i][j] != oth.m_rKey[i][j] )
return false;
}
}
return true;
}
private:
//private member to store vector of vector of Element.
std::vector< std::vector<Element> > m_rKey;
};
template<typename Element>
class LPEvalKeyNTRURelinImpl;
template<typename Element>
using LPEvalKeyNTRURelin = shared_ptr<LPEvalKeyNTRURelinImpl<Element>>;
/**
* @brief Evaluation Relinearization keys for NTRU scheme.
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyNTRURelinImpl : public LPEvalKeyImpl<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyNTRURelinImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {}
virtual ~LPEvalKeyNTRURelinImpl() {}
/**
* Copy constructor
*
*@param &rhs key to copy from
*/
explicit LPEvalKeyNTRURelinImpl(const LPEvalKeyNTRURelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = rhs.m_rKey;
}
/**
* Move constructor
*
*@param &rhs key to move from
*/
explicit LPEvalKeyNTRURelinImpl(LPEvalKeyNTRURelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = std::move(rhs.m_rKey);
}
/**
* Assignment Operator.
*
* @param &rhs key to copy from
*/
const LPEvalKeyNTRURelinImpl<Element>& operator=(const LPEvalKeyNTRURelinImpl<Element> &rhs) {
this->context = rhs.context;
this->m_rKey = rhs.m_rKey;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs key to move from
*/
const LPEvalKeyNTRURelinImpl<Element>& operator=(LPEvalKeyNTRURelinImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_rKey = std::move(rhs.m_rKey);
return *this;
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &a is the Element vector to be copied.
*/
virtual void SetAVector(const std::vector<Element> &a) {
for (usint i = 0; i < a.size(); i++) {
m_rKey.insert(m_rKey.begin() + i, a.at(i));
}
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &&a is the Element vector to be moved.
*/
virtual void SetAVector(std::vector<Element> &&a) {
m_rKey = std::move(a);
}
/**
* Getter function to access Relinearization Element Vector A.
* Overrides base class implementation.
*
* @return Element vector A.
*/
virtual const std::vector<Element> &GetAVector() const {
return m_rKey;
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool SerializeWithoutContext(Serialized *serObj) const;
/**
* Deserialize from the serialization
* @param serObj - contains the serialization
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool key_compare(const LPEvalKeyImpl<Element>& other) const {
const LPEvalKeyNTRURelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRURelinImpl<Element> &>(other);
if( !CryptoObject<Element>::operator ==(other) )
return false;
if( this->m_rKey.size() != oth.m_rKey.size() ) return false;
for( size_t i=0; i<this->m_rKey.size(); i++ ) {
if( this->m_rKey[i] != oth.m_rKey[i] )
return false;
}
return true;
}
private:
//private member to store vector of Element.
std::vector<Element> m_rKey;
};
template<typename Element>
class LPEvalKeyNTRUImpl;
template<typename Element>
using LPEvalKeyNTRU = shared_ptr<LPEvalKeyNTRUImpl<Element>>;
/**
* @brief Concrete class for facilitating NTRU key switch.
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyNTRUImpl : public LPEvalKeyImpl<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyNTRUImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {}
virtual ~LPEvalKeyNTRUImpl() {}
/**
* Copy constructor
*
*@param &rhs key to copy from
*/
explicit LPEvalKeyNTRUImpl(const LPEvalKeyNTRUImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_Key = rhs.m_Key;
}
/**
* Move constructor
*
*@param &rhs key to move from
*/
explicit LPEvalKeyNTRUImpl(LPEvalKeyNTRUImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_Key = std::move(rhs.m_Key);
}
/**
* Assignment Operator.
*
* @param &rhs key to copy from
*/
const LPEvalKeyNTRUImpl<Element>& operator=(const LPEvalKeyNTRUImpl<Element> &rhs) {
this->context = rhs.context;
this->m_Key = rhs.m_Key;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs key to move from
*/
const LPEvalKeyNTRUImpl<Element>& operator=(LPEvalKeyNTRUImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_Key = std::move(rhs.m_Key);
return *this;
}
/**
* Setter function to store NTRU key switch element.
* Function copies the key.
* Overrides the virtual function from base class LPEvalKeyImpl.
*
* @param &a is the key switch element to be copied.
*/
virtual void SetA(const Element &a) {
m_Key = a;
}
/**
* Setter function to store NTRU key switch Element.
* Function moves the key.
* Overrides the virtual function from base class LPEvalKeyImpl.
*
* @param &&a is the key switch Element to be moved.
*/
virtual void SetA(Element &&a) {
m_Key = std::move(a);
}
/**
* Getter function to access NTRU key switch Element.
* Overrides the virtual function from base class LPEvalKeyImpl.
*
* @return NTRU key switch Element.
*/
virtual const Element& GetA() const {
return m_Key;
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool SerializeWithoutContext(Serialized *serObj) const;
/**
* Deserialize from the serialization
* @param serObj - contains the serialization
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool key_compare(const LPEvalKeyImpl<Element>& other) const {
const LPEvalKeyNTRUImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRUImpl<Element> &>(other);
if( !CryptoObject<Element>::operator ==(other) )
return false;
if( this->m_Key != oth.m_Key )
return false;
return true;
}
private:
/**
* private member Element to store key.
*/
Element m_Key;
};
template<typename Element>
class LPPrivateKeyImpl;
template<typename Element>
using LPPrivateKey = shared_ptr<LPPrivateKeyImpl<Element>>;
/**
* @brief Private key implementation template for Ring-LWE, NTRU-based schemes,
* @tparam Element a ring element.
*/
template <class Element>
class LPPrivateKeyImpl : public LPKey<Element> {
public:
/**
* Construct in context
*/
LPPrivateKeyImpl(CryptoContext<Element> cc) : LPKey<Element>(cc, GenerateUniqueKeyID()) {}
/**
* Copy constructor
*@param &rhs the LPPrivateKeyImpl to copy from
*/
explicit LPPrivateKeyImpl(const LPPrivateKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
this->m_sk = rhs.m_sk;
}
/**
* Move constructor
*@param &rhs the LPPrivateKeyImpl to move from
*/
explicit LPPrivateKeyImpl(LPPrivateKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
this->m_sk = std::move(rhs.m_sk);
}
/**
* Assignment Operator.
*
* @param &rhs LPPrivateKeyto assign from.
* @return the resulting LPPrivateKeyImpl
*/
const LPPrivateKeyImpl<Element>& operator=(const LPPrivateKeyImpl<Element> &rhs) {
CryptoObject<Element>::operator=(rhs);
this->m_sk = rhs.m_sk;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs LPPrivateKeyImpl to assign from.
* @return the resulting LPPrivateKeyImpl
*/
const LPPrivateKeyImpl<Element>& operator=(LPPrivateKeyImpl<Element> &&rhs) {
CryptoObject<Element>::operator=(rhs);
this->m_sk = std::move(rhs.m_sk);
return *this;
}
/**
* Implementation of the Get accessor for private element.
* @return the private element.
*/
const Element & GetPrivateElement() const { return m_sk; }
/**
* Set accessor for private element.
* @private &x private element to set to.
*/
void SetPrivateElement(const Element &x) {
m_sk = x;
}
/**
* Set accessor for private element.
* @private &x private element to set to.
*/
void SetPrivateElement(Element &&x) {
m_sk = std::move(x);
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* Populate the object from the deserialization of the Setialized
* @param &serObj contains the serialized object
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool operator==(const LPPrivateKeyImpl& other) const {
return CryptoObject<Element>::operator ==(other) &&
m_sk == other.m_sk;
}
bool operator!=(const LPPrivateKeyImpl& other) const { return ! (*this == other); }
private:
static const size_t intsInID = 128 / (sizeof(uint32_t) * 8);
static string GenerateUniqueKeyID() {
std::uniform_int_distribution<uint32_t> distribution(0, std::numeric_limits<uint32_t>::max());
std::stringstream s;
s.fill('0');
s << std::hex;
for( size_t i = 0; i < intsInID; i++ )
s << std::setw(8) << distribution(PseudoRandomNumberGenerator::GetPRNG());
return s.str();
}
Element m_sk;
};
template <class Element>
class LPKeyPair {
public:
LPPublicKey<Element> publicKey;
LPPrivateKey<Element> secretKey;
LPKeyPair(LPPublicKeyImpl<Element>* a=0, LPPrivateKeyImpl<Element>* b=0): publicKey(a), secretKey(b) {}
bool good() { return publicKey && secretKey; }
};
/**
* @brief Abstract interface for parameter generation algorithm
* @tparam Element a ring element.
*/
template <class Element>
class LPParameterGenerationAlgorithm {
public:
virtual ~LPParameterGenerationAlgorithm() {}
/**
* Method for computing all derived parameters based on chosen primitive parameters
*
* @param *cryptoParams the crypto parameters object to be populated with parameters.
* @param evalAddCount number of EvalAdds assuming no EvalMult and KeySwitch operations are performed.
* @param evalMultCount number of EvalMults assuming no EvalAdd and KeySwitch operations are performed.
* @param keySwitchCount number of KeySwitch operations assuming no EvalAdd and EvalMult operations are performed.
* @param dcrtBits number of bits in each CRT modulus*
*/
virtual bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0,
int32_t evalMultCount = 0, int32_t keySwitchCount = 0, size_t dcrtBits = 0) const = 0;
};
/**
* @brief Abstract interface for encryption algorithm
* @tparam Element a ring element.
*/
template <class Element>
class LPEncryptionAlgorithm {
public:
virtual ~LPEncryptionAlgorithm() {}
/**
* Method for encrypting plaintext using LBC
*
* @param&publicKey public key used for encryption.
* @param plaintext copy of the plaintext element. NOTE a copy is passed! That is NOT an error!
* @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false
* @param *ciphertext ciphertext which results from encryption.
*/
virtual Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Element plaintext) const = 0;
/**
* Method for encrypting plaintex using LBC
*
* @param privateKey private key used for encryption.
* @param plaintext copy of the plaintext input. NOTE a copy is passed! That is NOT an error!
* @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false
* @param *ciphertext ciphertext which results from encryption.
*/
virtual Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Element plaintext) const = 0;
/**
* Method for decrypting plaintext using LBC
*
* @param &privateKey private key used for decryption.
* @param &ciphertext ciphertext id decrypted.
* @param *plaintext the plaintext output.
* @return the decoding result.
*/
virtual DecryptResult Decrypt(const LPPrivateKey<Element> privateKey,
ConstCiphertext<Element> ciphertext,
NativePoly *plaintext) const = 0;
/**
* Function to generate public and private keys
*
* @param &publicKey private key used for decryption.
* @param &privateKey private key used for decryption.
* @return function ran correctly.
*/
virtual LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse=false) = 0;
};
/**
* @brief Abstract interface for Leveled SHE operations
* @tparam Element a ring element.
*/
template <class Element>
class LPLeveledSHEAlgorithm {
public:
virtual ~LPLeveledSHEAlgorithm() {}
/**
* Method for Modulus Reduction.
*
* @param &cipherText Ciphertext to perform mod reduce on.
*/
virtual Ciphertext<Element> ModReduce(ConstCiphertext<Element> cipherText) const = 0;
/**
* Method for Ring Reduction.
*
* @param &cipherText Ciphertext to perform ring reduce on.
* @param &privateKey Private key used to encrypt the first argument.
*/
virtual Ciphertext<Element> RingReduce(ConstCiphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const = 0;
/**
* Method for Composed EvalMult
*
* @param &cipherText1 ciphertext1, first input ciphertext to perform multiplication on.
* @param &cipherText2 cipherText2, second input ciphertext to perform multiplication on.
* @param &quadKeySwitchHint is for resultant quadratic secret key after multiplication to the secret key of the particular level.
* @param &cipherTextResult is the resulting ciphertext that can be decrypted with the secret key of the particular level.
*/
virtual Ciphertext<Element> ComposedEvalMult(
ConstCiphertext<Element> cipherText1,
ConstCiphertext<Element> cipherText2,
const LPEvalKey<Element> quadKeySwitchHint) const = 0;
/**
* Method for Level Reduction from sk -> sk1. This method peforms a keyswitch on the ciphertext and then performs a modulus reduction.
*
* @param &cipherText1 is the original ciphertext to be key switched and mod reduced.
* @param &linearKeySwitchHint is the linear key switch hint to perform the key switch operation.
* @param &cipherTextResult is the resulting ciphertext.
*/
virtual Ciphertext<Element> LevelReduce(ConstCiphertext<Element> cipherText1,
const LPEvalKey<Element> linearKeySwitchHint) const = 0;
/**
* Function that determines if security requirements are met if ring dimension is reduced by half.
*
* @param ringDimension is the original ringDimension
* @param &moduli is the vector of moduli that is used
* @param rootHermiteFactor is the security threshold
*/
virtual bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const = 0;
};
/**
* @brief Abstract interface class for LBC PRE algorithms
* @tparam Element a ring element.
*/
template <class Element>
class LPPREAlgorithm {
public:
virtual ~LPPREAlgorithm() {}
/**
* Virtual function to generate 1..log(q) encryptions for each bit of the original private key.
* Variant that uses the new secret key directly.
*
* @param &newKey new private key for the new ciphertext.
* @param &origPrivateKey original private key used for decryption.
* @param *evalKey the evaluation key.
* @return the re-encryption key.
*/
virtual LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const = 0;
/**
* Virtual function to generate 1..log(q) encryptions for each bit of the original private key
* Variant that uses the public key for the new secret key.
*
* @param &newKey public key for the new secret key.
* @param &origPrivateKey original private key used for decryption.
* @param *evalKey the evaluation key.
* @return the re-encryption key.
*/
virtual LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const = 0;
/**
* Virtual function to define the interface for re-encypting ciphertext using the array generated by ProxyGen
*
* @param &evalKey proxy re-encryption key.
* @param &ciphertext the input ciphertext.
* @param *newCiphertext the new ciphertext.
*/
virtual Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey,
ConstCiphertext<Element> ciphertext) const = 0;
};
/**
* @brief Abstract interface class for LBC Multiparty algorithms. A version of this multiparty scheme built on the BGV scheme is seen here:
* - Asharov G., Jain A., López-Alt A., Tromer E., Vaikuntanathan V., Wichs D. (2012) Multiparty Computation with Low Communication, Computation and Interaction via Threshold FHE. In: Pointcheval D., Johansson T. (eds) Advances in Cryptology – EUROCRYPT 2012. EUROCRYPT 2012. Lecture Notes in Computer Science, vol 7237. Springer, Berlin, Heidelberg
*
* During offline key generation, this multiparty scheme relies on the clients coordinating their public key generation. To do this, a single client generates a public-secret key pair.
* This public key is shared with other keys which use an element in the public key to generate their own public keys.
* The clients generate a shared key pair using a scheme-specific approach, then generate re-encryption keys. Re-encryption keys are uploaded to the server.
* Clients encrypt data with their public keys and send the encrypted data server.
* The data is re-encrypted. Computations are then run on the data.
* The result is sent to each of the clients.
* One client runs a "Leader" multiparty decryption operation with its own secret key. All other clients run a regular "Main" multiparty decryption with their own secret key.
* The resulting partially decrypted ciphertext are then fully decrypted with the decryption fusion algorithms.
*
* @tparam Element a ring element.
*/
template <class Element>
class LPMultipartyAlgorithm {
public:
virtual ~LPMultipartyAlgorithm() {}
/**
* Function to generate public and private keys for multiparty homomrophic encryption in coordination with a leading client that generated a first public key.
*
* @param cc cryptocontext for the keys to be generated.
* @param pk1 private key used for decryption to be fused.
* @param makeSparse set to true if ring reduce by a factor of 2 is to be used.
* @param pre set to true if proxy re-encryption is used in multi-party protocol
* @return key pair including the private and public key
*/
virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const LPPublicKey<Element> pk1,
bool makeSparse=false, bool pre=false) = 0;
/**
* Function to generate public and private keys for multiparty homomrophic encryption server key pair in coordination with secret keys of clients.
*
* @param cc cryptocontext for the keys to be generated.
* @param secretkeys private keys used for decryption to be fused.
* @param makeSparse set to true if ring reduce by a factor of 2 is to be used.
* @return key pair including the private and public key
*/
virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const vector<LPPrivateKey<Element>>& secretKeys,
bool makeSparse=false) = 0;
/**
* Method for main decryption operation run by most decryption clients for multiparty homomorphic encryption
*
* @param privateKey private key used for decryption.
* @param ciphertext ciphertext id decrypted.
*/
virtual Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey,
ConstCiphertext<Element> ciphertext) const = 0;
/**
* Method for decryption operation run by the lead decryption client for multiparty homomorphic encryption
*
* @param privateKey private key used for decryption.
* @param ciphertext ciphertext id decrypted.
*/
virtual Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey,
ConstCiphertext<Element> ciphertext) const = 0;
/**
* Method for fusing the partially decrypted ciphertext.
*
* @param &ciphertextVec ciphertext id decrypted.
* @param *plaintext the plaintext output.
* @return the decoding result.
*/
virtual DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec,
NativePoly *plaintext) const = 0;
};
/**
* @brief Abstract interface class for LBC SHE algorithms
* @tparam Element a ring element.
*/
template <class Element>
class LPSHEAlgorithm {
public:
virtual ~LPSHEAlgorithm() {}
/**
* Virtual function to define the interface for homomorphic addition of ciphertexts.
*
* @param ciphertext1 the input ciphertext.
* @param ciphertext2 the input ciphertext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2) const = 0;
/**
* Virtual function to define the interface for homomorphic addition of ciphertexts.
*
* @param ciphertext the input ciphertext.
* @param plaintext the input plaintext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext,
ConstPlaintext plaintext) const = 0;
/**
* Virtual function to define the interface for homomorphic subtraction of ciphertexts.
*
* @param ciphertext1 the input ciphertext.
* @param ciphertext2 the input ciphertext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2) const = 0;
/**
* Virtual function to define the interface for homomorphic subtraction of ciphertexts.
*
* @param ciphertext the input ciphertext.
* @param plaintext the input plaintext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext,
ConstPlaintext plaintext) const = 0;
/**
* Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext.
*
* @param ciphertext1 the input ciphertext.
* @param ciphertext2 the input ciphertext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2) const = 0;
/**
* Virtual function to define the interface for multiplication of ciphertext by plaintext.
*
* @param ciphertext the input ciphertext.
* @param plaintext the input plaintext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext,
ConstPlaintext plaintext) const = 0;
/**
* Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key.
*
* @param &ciphertext1 first input ciphertext.
* @param &ciphertext2 second input ciphertext.
* @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2, const LPEvalKey<Element> ek) const = 0;
/**
* Virtual function for evaluating multiplication of a ciphertext list which each multiplication is followed by relinearization operation.
*
* @param cipherTextList is the ciphertext list.
* @param evalKeys is the evaluation key to make the newCiphertext
* decryptable by the same secret key as that of ciphertext list.
* @param *newCiphertext the new resulting ciphertext.
*/
virtual Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& cipherTextList,
const vector<LPEvalKey<Element>> &evalKeys) const {
// default implementation if you don't have one in your scheme
const size_t inSize = cipherTextList.size();
const size_t lim = inSize * 2 - 2;
vector<Ciphertext<Element>> cipherTextResults;
cipherTextResults.resize(inSize - 1);
size_t ctrIndex = 0;
for(size_t i=0; i < lim; i = i + 2) {
cipherTextResults[ctrIndex++] = this->EvalMult(
i < inSize ? cipherTextList[i] : cipherTextResults[i - inSize],
i+1 < inSize ? cipherTextList[i+1] : cipherTextResults[i + 1 - inSize]);
}
return cipherTextResults.back();
}
/**
* Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key.
*
* @param ct1 first input ciphertext.
* @param ct2 second input ciphertext.
* @param ek is the evaluation key to make the newCiphertext
* decryptable by the same secret key as that of ciphertext1 and ciphertext2.
* @param *newCiphertext the new resulting ciphertext.
*/
virtual Ciphertext<Element> EvalMultAndRelinearize(ConstCiphertext<Element> ct1,
ConstCiphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const = 0;
/**
* EvalLinRegression - Computes the parameter vector for linear regression using the least squares method
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const
{
// multiplication is done in reverse order to minimize the number of inner products
Matrix<RationalCiphertext<Element>> xTransposed = x->Transpose();
shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(xTransposed * (*y)));
Matrix<RationalCiphertext<Element>> xCovariance = xTransposed * (*x);
Matrix<RationalCiphertext<Element>> cofactorMatrix = xCovariance.CofactorMatrix();
Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose();
*result = adjugateMatrix * (*result);
RationalCiphertext<Element> determinant;
xCovariance.Determinant(&determinant);
for (size_t row = 0; row < result->GetRows(); row++)
for (size_t col = 0; col < result->GetCols(); col++)
(*result)(row, col).SetDenominator(determinant.GetNumerator());
return result;
}
/**
* Virtual function to define the interface for homomorphic negation of ciphertext.
*
* @param &ciphertext the input ciphertext.
* @param *newCiphertext the new ciphertext.
*/
virtual Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ciphertext) const = 0;
/**
* Function to add random noise to all plaintext slots except for the first one; used in EvalInnerProduct
*
* @param &ciphertext the input ciphertext.
* @return modified ciphertext
*/
Ciphertext<Element> AddRandomNoise(ConstCiphertext<Element> ciphertext) const {
string kID = ciphertext->GetKeyTag();
const auto cryptoParams = ciphertext->GetCryptoParameters();
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
usint n = elementParams->GetRingDimension();
auto cc = ciphertext->GetCryptoContext();
DiscreteUniformGenerator dug;
dug.SetModulus(encodingParams->GetPlaintextModulus());
BigVector randomVector = dug.GenerateVector(n - 1);
std::vector<int64_t> randomIntVector(n);
//first plaintext slot does not need to change
randomIntVector[0] = 0;
for (usint i = 0; i < n - 1; i++)
{
randomIntVector[i + 1] = randomVector[i].ConvertToInt();
}
Plaintext plaintext = cc->MakePackedPlaintext(randomIntVector);
plaintext->Encode();
plaintext->GetElement<Element>().SetFormat(EVALUATION);
auto ans = EvalAdd(ciphertext, plaintext);
return ans;
};
/**
* Method for KeySwitchGen
*
* @param &originalPrivateKey Original private key used for encryption.
* @param &newPrivateKey New private key to generate the keyswitch hint.
* @param *KeySwitchHint is where the resulting keySwitchHint will be placed.
*/
virtual LPEvalKey<Element> KeySwitchGen(
const LPPrivateKey<Element> originalPrivateKey,
const LPPrivateKey<Element> newPrivateKey) const = 0;
/**
* Method for KeySwitch
*
* @param &keySwitchHint Hint required to perform the ciphertext switching.
* @param &cipherText Original ciphertext to perform switching on.
*/
virtual Ciphertext<Element> KeySwitch(
const LPEvalKey<Element> keySwitchHint,
ConstCiphertext<Element> cipherText) const = 0;
/**
* Method for KeySwitching based on RLWE relinearization (used only for the LTV scheme).
* Function to generate 1..log(q) encryptions for each bit of the original private key
*
* @param &newPublicKey encryption key for the new ciphertext.
* @param origPrivateKey original private key used for decryption.
*/
virtual LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newPublicKey,
const LPPrivateKey<Element> origPrivateKey) const = 0;
/**
* Method for KeySwitching based on RLWE relinearization (used only for the LTV scheme).
*
* @param evalKey the evaluation key.
* @param ciphertext the input ciphertext.
* @return the resulting Ciphertext
*/
virtual Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey,
ConstCiphertext<Element> ciphertext) const = 0;
/**
* Virtual function to define the interface for generating a evaluation key which is used after each multiplication.
*
* @param &ciphertext1 first input ciphertext.
* @param &ciphertext2 second input ciphertext.
* @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2.
* @param *newCiphertext the new resulting ciphertext.
*/
virtual LPEvalKey<Element> EvalMultKeyGen(
const LPPrivateKey<Element> originalPrivateKey) const = 0;
/**
* Virtual function to define the interface for generating a evaluation key which is used after each multiplication for depth more than 2.
*
* @param &originalPrivateKey Original private key used for encryption.
* @param *evalMultKeys the resulting evalution key vector list.
*/
virtual vector<LPEvalKey<Element>> EvalMultKeysGen(
const LPPrivateKey<Element> originalPrivateKey) const = 0;
/**
* Virtual function to generate all isomorphism keys for a given private key
*
* @param publicKey encryption key for the new ciphertext.
* @param origPrivateKey original private key used for decryption.
* @param indexList list of automorphism indices to be computed
* @return returns the evaluation keys
*/
virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey,
const LPPrivateKey<Element> origPrivateKey,
const std::vector<usint> &indexList) const = 0;
/**
* Generates evaluation keys for a list of indices
* Currently works only for power-of-two and cyclic-group cyclotomics
*
* @param publicKey encryption key for the new ciphertext.
* @param origPrivateKey original private key used for decryption.
* @param indexList list of indices to be computed
* @return returns the evaluation keys
*/
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAtIndexKeyGen(const LPPublicKey<Element> publicKey,
const LPPrivateKey<Element> origPrivateKey,
const std::vector<int32_t> &indexList) const
{
const auto cryptoParams = origPrivateKey->GetCryptoParameters();
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
uint32_t m = elementParams->GetCyclotomicOrder();
std::vector<uint32_t> autoIndices(indexList.size());
if (!(m & (m-1))) { // power-of-two cyclotomics
for (size_t i=0; i < indexList.size(); i++)
autoIndices[i] = FindAutomorphismIndex2n(indexList[i],m);
}
else // cyclic groups
{
for (size_t i=0; i < indexList.size(); i++)
autoIndices[i] = FindAutomorphismIndexCyclic(indexList[i],m,encodingParams->GetPlaintextGenerator());
}
if (publicKey)
// NTRU-based scheme
return EvalAutomorphismKeyGen(publicKey,origPrivateKey,autoIndices);
else
// RLWE-based scheme
return EvalAutomorphismKeyGen(origPrivateKey,autoIndices);
}
/**
* Virtual function for evaluating automorphism of ciphertext at index i
*
* @param ciphertext the input ciphertext.
* @param i automorphism index
* @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen.
* @return resulting ciphertext
*/
virtual Ciphertext<Element> EvalAutomorphism(ConstCiphertext<Element> ciphertext, usint i,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const = 0;
/**
* Moves i-th slot to slot 0
*
* @param ciphertext.
* @param i the index.
* @param &evalAtIndexKeys - reference to the map of evaluation keys generated by EvalAtIndexKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext,
int32_t index, const std::map<usint, LPEvalKey<Element>> &evalAtIndexKeys) const {
const auto cryptoParams = ciphertext->GetCryptoParameters();
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
uint32_t m = elementParams->GetCyclotomicOrder();
uint32_t autoIndex;
if (!(m & (m-1))) // power-of-two cyclotomics
autoIndex = FindAutomorphismIndex2n(index,m);
else // cyclyc-group cyclotomics
autoIndex = FindAutomorphismIndexCyclic(index,m,encodingParams->GetPlaintextGenerator());
return EvalAutomorphism(ciphertext,autoIndex,evalAtIndexKeys);
}
/**
* Virtual function to generate automophism keys for a given private key; Uses the private key for encryption
*
* @param privateKey private key.
* @param indexList list of automorphism indices to be computed
* @return returns the evaluation keys
*/
virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey,
const std::vector<usint> &indexList) const = 0;
/**
* Virtual function to generate the automorphism keys for EvalSum; works only for packed encoding
*
* @param privateKey private key.
* @return returns the evaluation keys
*/
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen(const LPPrivateKey<Element> privateKey,
const LPPublicKey<Element> publicKey) const
{
const auto cryptoParams = privateKey->GetCryptoParameters();
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
usint batchSize = encodingParams->GetBatchSize();
usint m = elementParams->GetCyclotomicOrder();
// stores automorphism indices needed for EvalSum
std::vector<usint> indices;
if (!(m & (m-1))){ // Check if m is a power of 2
indices = GenerateIndices_2n(batchSize, m);
} else { // Arbitray cyclotomics
usint g = encodingParams->GetPlaintextGenerator();
for (int i = 0; i < floor(log2(batchSize)); i++)
{
indices.push_back(g);
g = (g * g) % m;
}
}
if (publicKey)
// NTRU-based scheme
return EvalAutomorphismKeyGen(publicKey, privateKey, indices);
else
// Regular RLWE scheme
return EvalAutomorphismKeyGen(privateKey, indices);
}
/**
* Sums all elements in log (batch size) time - works only with packed encoding
*
* @param ciphertext the input ciphertext.
* @param batchSize size of the batch to be summed up
* @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
const shared_ptr<LPCryptoParameters<Element>> cryptoParams = ciphertext->GetCryptoParameters();
Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext));
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
usint m = elementParams->GetCyclotomicOrder();
if ((encodingParams->GetBatchSize() == 0))
throw std::runtime_error("EvalSum: Packed encoding parameters 'batch size' is not set; Please check the EncodingParams passed to the crypto context.");
else
{
if (!(m & (m-1))){ // Check if m is a power of 2
newCiphertext = EvalSum_2n(batchSize, m, evalKeys,newCiphertext);
} else { // Arbitray cyclotomics
if (encodingParams->GetPlaintextGenerator() == 0)
throw std::runtime_error("EvalSum: Packed encoding parameters 'plaintext generator' is not set; Please check the EncodingParams passed to the crypto context.");
else
{
usint g = encodingParams->GetPlaintextGenerator();
for (int i = 0; i < floor(log2(batchSize)); i++)
{
auto ea = EvalAutomorphism(newCiphertext, g, evalKeys);
newCiphertext = EvalAdd(newCiphertext, ea);
g = (g * g) % m;
}
}
}
}
return newCiphertext;
}
/**
* Evaluates inner product in batched encoding
*
* @param ciphertext1 first vector.
* @param ciphertext2 second vector.
* @param batchSize size of the batch to be summed up
* @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2, evalMultKey);
result = EvalSum(result, batchSize, evalSumKeys);
// add a random number to all slots except for the first one so that no information is leaked
result = AddRandomNoise(result);
return result;
}
/**
* Evaluates inner product in batched encoding
*
* @param ciphertext1 first vector.
* @param ciphertext2 plaintext.
* @param batchSize size of the batch to be summed up
* @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1,
ConstPlaintext ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const {
Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2);
result = EvalSum(result, batchSize, evalSumKeys);
// add a random number to all slots except for the first one so that no information is leaked
return AddRandomNoise(result);
}
/**
* Merges multiple ciphertexts with encrypted results in slot 0 into a single ciphertext
* The slot assignment is done based on the order of ciphertexts in the vector
*
* @param ciphertextVector vector of ciphertexts to be merged.
* @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalMerge(const vector<Ciphertext<Element>> &ciphertextVector,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
if (ciphertextVector.size() == 0)
throw std::runtime_error("EvalMerge: the vector of ciphertexts to be merged cannot be empty");
const shared_ptr<LPCryptoParameters<Element>> cryptoParams = ciphertextVector[0]->GetCryptoParameters();
Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*(ciphertextVector[0])));
auto cc = ciphertextVector[0]->GetCryptoContext();
std::vector<int64_t> plaintextVector = {1,0};
Plaintext plaintext = cc->MakePackedPlaintext(plaintextVector);
newCiphertext = EvalMult(newCiphertext,plaintext);
for (size_t i = 1; i < ciphertextVector.size(); i++)
{
newCiphertext = EvalAdd(newCiphertext,EvalAtIndex(EvalMult(ciphertextVector[i],plaintext),-(int32_t)i,evalKeys));
}
return newCiphertext;
}
/**
* EvalLinRegressBatched - Computes the parameter vector for linear regression using the least squares method
* Currently supports only two regressors
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const
{
Matrix<RationalCiphertext<Element>> covarianceMatrix(x->GetAllocator(), 2, 2);
Ciphertext<Element> x0 = (*x)(0, 0).GetNumerator();
Ciphertext<Element> x1 = (*x)(0, 1).GetNumerator();
Ciphertext<Element> y0 = (*y)(0, 0).GetNumerator();
//Compute the covariance matrix for X
covarianceMatrix(0, 0).SetNumerator(EvalInnerProduct(x0, x0, batchSize, evalSumKeys, evalMultKey));
covarianceMatrix(0, 1).SetNumerator(EvalInnerProduct(x0, x1, batchSize, evalSumKeys, evalMultKey));
covarianceMatrix(1, 0) = covarianceMatrix(0, 1);
covarianceMatrix(1, 1).SetNumerator(EvalInnerProduct(x1, x1, batchSize, evalSumKeys, evalMultKey));
Matrix<RationalCiphertext<Element>> cofactorMatrix = covarianceMatrix.CofactorMatrix();
Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose();
shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(x->GetAllocator(), 2, 1));
(*result)(0, 0).SetNumerator(EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey));
(*result)(1, 0).SetNumerator(EvalInnerProduct(x1, y0, batchSize, evalSumKeys, evalMultKey));
*result = adjugateMatrix * (*result);
RationalCiphertext<Element> determinant;
covarianceMatrix.Determinant(&determinant);
for (size_t row = 0; row < result->GetRows(); row++)
for (size_t col = 0; col < result->GetCols(); col++)
(*result)(row, col).SetDenominator(determinant.GetNumerator());
return result;
}
/**
* EvalCrossCorrelation - Computes the sliding sum of inner products (known as
* as cross-correlation, sliding inner product, or sliding dot product in
* image processing
* @param x - first vector of row vectors
* @param y - second vector of row vectors
* @param batchSize - batch size for packed encoding
* @param indexStart - starting index in the vectors of row vectors
* @param length - length of the slice in the vectors of row vectors
* @param evalSumKeys - evaluation keys used for the automorphism operation
* @param evalMultKey - the evaluation key used for multiplication
* @return sum(x_i*y_i), i.e., a sum of inner products
*/
Ciphertext<Element>
EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
usint indexStart, usint length,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const
{
if (length == 0)
length = x->GetRows();
if (length - indexStart > x->GetRows())
throw std::runtime_error("The number of rows exceeds the dimension of the vector");
//additional error checking can be added here
Ciphertext<Element> result;
Ciphertext<Element> x0 = (*x)(indexStart, 0).GetNumerator();
Ciphertext<Element> y0 = (*y)(indexStart, 0).GetNumerator();
result = EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey);
#pragma omp parallel for ordered schedule(dynamic)
for (usint i = indexStart + 1; i < indexStart + length; i++)
{
Ciphertext<Element> xi = (*x)(i, 0).GetNumerator();
Ciphertext<Element> yi = (*y)(i, 0).GetNumerator();
auto product = EvalInnerProduct(xi, yi, batchSize, evalSumKeys, evalMultKey);
#pragma omp ordered
{
result = EvalAdd(result,product);
}
}
return result;
}
private:
std::vector<usint> GenerateIndices_2n(usint batchSize, usint m) const {
// stores automorphism indices needed for EvalSum
std::vector<usint> indices;
usint g = 5;
for (int i = 0; i < floor(log2(batchSize)) - 1; i++)
{
indices.push_back(g);
g = (g * g) % m;
}
if (2*batchSize<m)
indices.push_back(g);
indices.push_back(m-1);
return indices;
}
Ciphertext<Element> EvalSum_2n(usint batchSize, usint m, const std::map<usint, LPEvalKey<Element>> &evalKeys,
ConstCiphertext<Element> ciphertext) const{
Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext));
usint g = 5;
for (int i = 0; i < floor(log2(batchSize)) - 1; i++)
{
newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys));
g = (g * g) % m;
}
if (2*batchSize<m)
newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys));
newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, m-1, evalKeys));
return newCiphertext;
}
};
/**
* @brief Abstract interface class for LBC SHE algorithms
* @tparam Element a ring element.
*/
template <class Element>
class LPFHEAlgorithm {
public:
virtual ~LPFHEAlgorithm() {}
/**
* Virtual function to define the interface for bootstrapping evaluation of ciphertext
*
* @param &ciphertext the input ciphertext.
* @param *newCiphertext the new ciphertext.
*/
virtual void Bootstrap(ConstCiphertext<Element> &ciphertext,
Ciphertext<Element> *newCiphertext) const = 0;
};
/**
* @brief main implementation class to capture essential cryptoparameters of any LBC system
* @tparam Element a ring element.
*/
template <typename Element>
class LPCryptoParameters : public Serializable
{
public:
virtual ~LPCryptoParameters() {}
/**
* Returns the value of plaintext modulus p
*
* @return the plaintext modulus.
*/
const PlaintextModulus &GetPlaintextModulus() const { return m_encodingParams->GetPlaintextModulus(); }
/**
* Returns the reference to IL params
*
* @return the ring element parameters.
*/
const shared_ptr<typename Element::Params> GetElementParams() const { return m_params; }
/**
* Returns the reference to encoding params
*
* @return the encoding parameters.
*/
const EncodingParams GetEncodingParams() const { return m_encodingParams; }
/**
* Sets the value of plaintext modulus p
*/
void SetPlaintextModulus(const PlaintextModulus &plaintextModulus) {
m_encodingParams->SetPlaintextModulus(plaintextModulus);
}
virtual bool operator==(const LPCryptoParameters<Element>& cmp) const = 0;
bool operator!=(const LPCryptoParameters<Element>& cmp) const { return !(*this == cmp); }
/**
* Overload to allow printing of parameters to an iostream
* NOTE that the implementation relies on calling the virtual PrintParameters method
* @param out - the stream to print to
* @param item - reference to the item to print
* @return the stream
*/
friend std::ostream& operator<<(std::ostream& out, const LPCryptoParameters& item) {
item.PrintParameters(out);
return out;
}
virtual usint GetRelinWindow() const { return 0; }
virtual int GetDepth() const { return 0; }
virtual size_t GetMaxDepth() const { return 0; }
virtual const typename Element::DggType &GetDiscreteGaussianGenerator() const {
throw std::logic_error("No DGG Available for this parameter set");
}
/**
* Sets the reference to element params
*/
void SetElementParams(shared_ptr<typename Element::Params> params) {
m_params = params;
}
/**
* Sets the reference to encoding params
*/
void SetEncodingParams(EncodingParams encodingParams) {
m_encodingParams = encodingParams;
}
protected:
LPCryptoParameters(const PlaintextModulus &plaintextModulus = 2) {
m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) );
}
LPCryptoParameters(shared_ptr<typename Element::Params> params, const PlaintextModulus &plaintextModulus) {
m_params = params;
m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) );
}
LPCryptoParameters(shared_ptr<typename Element::Params> params, EncodingParams encodingParams) {
m_params = params;
m_encodingParams = encodingParams;
}
LPCryptoParameters(LPCryptoParameters<Element> *from, shared_ptr<typename Element::Params> newElemParms) {
*this = *from;
m_params = newElemParms;
}
virtual void PrintParameters(std::ostream& out) const {
out << "Element Parameters: " << *m_params << std::endl;
out << "Encoding Parameters: " << *m_encodingParams << std::endl;
}
private:
//element-specific parameters
shared_ptr<typename Element::Params> m_params;
//encoding-specific parameters
EncodingParams m_encodingParams;
};
// forward decl so SchemeIdentifier works
template<typename Element> class LPPublicKeyEncryptionScheme;
template<typename Element>
class PalisadeSchemeIdentifier {
string schemeName;
LPPublicKeyEncryptionScheme<Element> *(*schemeMaker)();
public:
PalisadeSchemeIdentifier(string n, LPPublicKeyEncryptionScheme<Element> (*f)())
: schemeName(n), schemeMaker(f) {}
const string& GetName() const { return schemeName; }
LPPublicKeyEncryptionScheme<Element> *GetScheme() const { return (*schemeMaker)(); }
};
/**
* @brief Abstract interface for public key encryption schemes
* @tparam Element a ring element.
*/
template<typename Element>
class LPPublicKeyEncryptionScheme {
protected:
//PalisadeSchemeIdentifier<Element> *SchemeId;
public:
LPPublicKeyEncryptionScheme() :
m_algorithmParamsGen(0), m_algorithmEncryption(0), m_algorithmPRE(0), m_algorithmMultiparty(0),
m_algorithmSHE(0), m_algorithmFHE(0), m_algorithmLeveledSHE(0) {}
virtual ~LPPublicKeyEncryptionScheme() {
if (this->m_algorithmParamsGen != NULL)
delete this->m_algorithmParamsGen;
if (this->m_algorithmEncryption != NULL)
delete this->m_algorithmEncryption;
if (this->m_algorithmPRE != NULL)
delete this->m_algorithmPRE;
if (this->m_algorithmMultiparty != NULL)
delete this->m_algorithmMultiparty;
if (this->m_algorithmSHE != NULL)
delete this->m_algorithmSHE;
if (this->m_algorithmFHE != NULL)
delete this->m_algorithmFHE;
if (this->m_algorithmLeveledSHE != NULL)
delete this->m_algorithmLeveledSHE;
}
virtual bool operator==(const LPPublicKeyEncryptionScheme& sch) const = 0;
bool operator!=(const LPPublicKeyEncryptionScheme& sch) const {
return !(*this == sch);
}
/**
* Enable features with a bit mast of PKESchemeFeature codes
* @param mask
*/
void Enable(usint mask) {
if (mask&ENCRYPTION) Enable(ENCRYPTION);
if (mask&PRE) Enable(PRE);
if (mask&SHE) Enable(SHE);
if (mask&LEVELEDSHE) Enable(LEVELEDSHE);
if (mask&MULTIPARTY) Enable(MULTIPARTY);
if (mask&FHE) Enable(FHE);
}
usint GetEnabled() const {
usint flag = 0;
if (m_algorithmEncryption != NULL)
flag |= ENCRYPTION;
if (m_algorithmPRE != NULL)
flag |= PRE;
if (m_algorithmSHE != NULL)
flag |= SHE;
if (m_algorithmFHE != NULL)
flag |= FHE;
if (m_algorithmLeveledSHE != NULL)
flag |= LEVELEDSHE;
if (m_algorithmMultiparty != NULL)
flag |= MULTIPARTY;
return flag;
}
//instantiated in the scheme implementation class
virtual void Enable(PKESchemeFeature feature) = 0;
/////////////////////////////////////////
// wrapper for LPParameterSelectionAlgorithm
//
bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0,
int32_t evalMultCount = 0, int32_t keySwitchCount = 0, size_t dcrtBits = 0) const {
if (this->m_algorithmParamsGen) {
return this->m_algorithmParamsGen->ParamsGen(cryptoParams, evalAddCount, evalMultCount, keySwitchCount, dcrtBits);
}
else {
throw std::logic_error("Parameter generation operation has not been implemented");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPEncryptionAlgorithm (ENCRYPT)
//
Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey,
const Element &plaintext) const {
if(this->m_algorithmEncryption) {
return this->m_algorithmEncryption->Encrypt(publicKey,plaintext);
}
else {
throw std::logic_error("Encrypt operation has not been enabled");
}
}
Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey,
const Element &plaintext) const {
if(this->m_algorithmEncryption) {
return this->m_algorithmEncryption->Encrypt(privateKey,plaintext);
}
else {
throw std::logic_error("Encrypt operation has not been enabled");
}
}
DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext,
NativePoly *plaintext) const {
if(this->m_algorithmEncryption)
return this->m_algorithmEncryption->Decrypt(privateKey,ciphertext,plaintext);
else {
throw std::logic_error("Decrypt operation has not been enabled");
}
}
LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse) {
if(this->m_algorithmEncryption) {
auto kp = this->m_algorithmEncryption->KeyGen(cc, makeSparse);
kp.publicKey->SetKeyTag( kp.secretKey->GetKeyTag() );
return kp;
}
else {
throw std::logic_error("KeyGen operation has not been enabled");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPPREAlgorithm (PRE)
//
LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const {
if(this->m_algorithmPRE) {
auto rk = this->m_algorithmPRE->ReKeyGen(newKey,origPrivateKey);
rk->SetKeyTag( newKey->GetKeyTag() );
return rk;
} else {
throw std::logic_error("ReKeyGen operation has not been enabled");
}
}
LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const {
if (this->m_algorithmPRE) {
auto rk = this->m_algorithmPRE->ReKeyGen(newKey,origPrivateKey);
rk->SetKeyTag( newKey->GetKeyTag() );
return rk;
} else {
throw std::logic_error("ReKeyGen operation has not been enabled");
}
}
Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey,
ConstCiphertext<Element> ciphertext) const {
if(this->m_algorithmPRE) {
auto ct = this->m_algorithmPRE->ReEncrypt(evalKey,ciphertext);
ct->SetKeyTag( evalKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("ReEncrypt operation has not been enabled");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPMultipartyAlgorithm (Multiparty)
//
// Wrapper for Multiparty Key Gen
// FIXME check key ID for multiparty
LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const LPPublicKey<Element> pk1,
bool makeSparse, bool PRE) {
if(this->m_algorithmMultiparty) {
auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, pk1, makeSparse, PRE);
k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() );
return k;
} else {
throw std::logic_error("MultipartyKeyGen operation has not been enabled");
}
}
// Wrapper for Multiparty Key Gen
// FIXME key IDs for multiparty
LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const vector<LPPrivateKey<Element>>& secretKeys,
bool makeSparse) {
if(this->m_algorithmMultiparty) {
auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, secretKeys, makeSparse);
k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() );
return k;
} else {
throw std::logic_error("MultipartyKeyGen operation has not been enabled");
}
}
// FIXME key IDs for multiparty
Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey,
ConstCiphertext<Element> ciphertext) const {
if(this->m_algorithmMultiparty) {
auto ct = this->m_algorithmMultiparty->MultipartyDecryptMain(privateKey,ciphertext);
ct->SetKeyTag( privateKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("MultipartyDecryptMain operation has not been enabled");
}
}
// FIXME key IDs for multiparty
Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey,
ConstCiphertext<Element> ciphertext) const {
if(this->m_algorithmMultiparty) {
auto ct = this->m_algorithmMultiparty->MultipartyDecryptLead(privateKey,ciphertext);
ct->SetKeyTag( privateKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("MultipartyDecryptLead operation has not been enabled");
}
}
DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec,
NativePoly *plaintext) const {
if(this->m_algorithmMultiparty) {
return this->m_algorithmMultiparty->MultipartyDecryptFusion(ciphertextVec,plaintext);
} else {
throw std::logic_error("MultipartyDecrypt operation has not been enabled");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPSHEAlgorithm (SHE)
//
Ciphertext<Element> AddRandomNoise(ConstCiphertext<Element> ciphertext) const {
if (this->m_algorithmSHE)
return this->m_algorithmSHE->AddRandomNoise(ciphertext);
else {
throw std::logic_error("AddRandomNoise operation has not been enabled");
}
}
Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, ciphertext2);
return ct;
} else {
throw std::logic_error("EvalAdd operation has not been enabled");
}
}
Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext1,
ConstPlaintext plaintext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, plaintext);
return ct;
} else {
throw std::logic_error("EvalAdd operation has not been enabled");
}
}
Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, ciphertext2);
return ct;
} else {
throw std::logic_error("EvalSub operation has not been enabled");
}
}
Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext1,
ConstPlaintext plaintext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, plaintext);
return ct;
} else {
throw std::logic_error("EvalSub operation has not been enabled");
}
}
Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2);
return ct;
} else {
throw std::logic_error("EvalMult operation has not been enabled");
}
}
Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext,
ConstPlaintext plaintext) const {
if (this->m_algorithmSHE)
return this->m_algorithmSHE->EvalMult(ciphertext, plaintext);
else {
throw std::logic_error("EvalMult operation has not been enabled");
}
}
Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2,
const LPEvalKey<Element> evalKey) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2, evalKey);
return ct;
} else {
throw std::logic_error("EvalMult operation has not been enabled");
}
}
Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& ciphertext, const vector<LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE){
return this->m_algorithmSHE->EvalMultMany(ciphertext, evalKeys);
}
else {
throw std::logic_error("EvalMultMany operation has not been enabled");
}
}
Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ciphertext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalNegate(ciphertext);
return ct;
} else {
throw std::logic_error("EvalNegate operation has not been enabled");
}
}
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey,
const LPPrivateKey<Element> origPrivateKey,
const std::vector<usint> &indexList) const {
if (this->m_algorithmSHE) {
auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(publicKey,origPrivateKey,indexList);
for( auto& k : *km )
k.second->SetKeyTag( origPrivateKey->GetKeyTag() );
return km;
} else
throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled");
}
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAtIndexKeyGen(const LPPublicKey<Element> publicKey,
const LPPrivateKey<Element> origPrivateKey,
const std::vector<int32_t> &indexList) const {
if (this->m_algorithmSHE) {
auto km = this->m_algorithmSHE->EvalAtIndexKeyGen(publicKey,origPrivateKey,indexList);
for( auto& k : *km )
k.second->SetKeyTag( origPrivateKey->GetKeyTag() );
return km;
} else
throw std::logic_error("EvalAtIndexKeyGen operation has not been enabled");
}
Ciphertext<Element> EvalAutomorphism(ConstCiphertext<Element> ciphertext, usint i,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalAutomorphism(ciphertext, i, evalKeys);
return ct;
} else
throw std::logic_error("EvalAutomorphism operation has not been enabled");
}
Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, usint i,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalAtIndex(ciphertext, i, evalKeys);
return ct;
} else
throw std::logic_error("EvalAtIndex operation has not been enabled");
}
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey,
const std::vector<usint> &indexList) const {
if (this->m_algorithmSHE) {
auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(privateKey, indexList);
for( auto& k : *km )
k.second->SetKeyTag( privateKey->GetKeyTag() );
return km;
} else
throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled");
}
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen(
const LPPrivateKey<Element> privateKey,
const LPPublicKey<Element> publicKey) const {
if (this->m_algorithmSHE) {
auto km = this->m_algorithmSHE->EvalSumKeyGen(privateKey,publicKey);
for( auto& k : *km ) {
k.second->SetKeyTag( privateKey->GetKeyTag() );
}
return km;
} else
throw std::logic_error("EvalSumKeyGen operation has not been enabled");
}
Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalSum(ciphertext, batchSize, evalKeys);
return ct;
} else
throw std::logic_error("EvalSum operation has not been enabled");
}
Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys, evalMultKey);
ct->SetKeyTag( evalSumKeys.begin()->second->GetKeyTag() );
return ct;
} else
throw std::logic_error("EvalInnerProduct operation has not been enabled");
}
Ciphertext<Element> EvalMerge(const vector<Ciphertext<Element>> &ciphertextVector,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalMerge(ciphertextVector,evalKeys);
return ct;
} else
throw std::logic_error("EvalMerge operation has not been enabled");
}
Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1,
ConstPlaintext ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const {
if (this->m_algorithmSHE)
return this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys);
else
throw std::logic_error("EvalInnerProduct operation has not been enabled");
}
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
if (this->m_algorithmSHE) {
string kID = evalMultKey->GetKeyTag();
auto ctm = this->m_algorithmSHE->EvalLinRegressBatched(x, y, batchSize, evalSumKeys, evalMultKey);
for( size_t r = 0; r < ctm->GetRows(); r++ )
for( size_t c = 0; c < ctm->GetCols(); c++ )
(*ctm)(r,c).SetKeyTag(kID);
return ctm;
} else
throw std::logic_error("EvalLinRegressionBatched operation has not been enabled");
}
Ciphertext<Element>
EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
usint indexStart, usint length,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalCrossCorrelation(x, y, batchSize, indexStart, length, evalSumKeys, evalMultKey);
// FIXME: mark with which key?
return ct;
} else
throw std::logic_error("EvalCrossCorrelation operation has not been enabled");
}
/**
* EvalLinRegression - Computes the parameter vector for linear regression using the least squares method
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const
{
if (this->m_algorithmSHE) {
auto ctm = this->m_algorithmSHE->EvalLinRegression(x, y);
// FIXME mark with which key??
return ctm;
} else {
throw std::logic_error("EvalLinRegression operation has not been enabled");
}
}
LPEvalKey<Element> KeySwitchGen(
const LPPrivateKey<Element> originalPrivateKey,
const LPPrivateKey<Element> newPrivateKey) const {
if (this->m_algorithmSHE) {
auto kp = this->m_algorithmSHE->KeySwitchGen(originalPrivateKey, newPrivateKey);
kp->SetKeyTag( newPrivateKey->GetKeyTag() );
return kp;
} else {
throw std::logic_error("KeySwitchGen operation has not been enabled");
}
}
Ciphertext<Element> KeySwitch(
const LPEvalKey<Element> keySwitchHint,
ConstCiphertext<Element> cipherText) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->KeySwitch(keySwitchHint, cipherText);
return ct;
}
else {
throw std::logic_error("KeySwitch operation has not been enabled");
}
}
LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const {
if (this->m_algorithmSHE) {
auto kp = this->m_algorithmSHE->KeySwitchRelinGen(newKey, origPrivateKey);
kp->SetKeyTag( newKey->GetKeyTag() );
return kp;
} else {
throw std::logic_error("KeySwitchRelinGen operation has not been enabled");
}
}
Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey,
ConstCiphertext<Element> ciphertext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->KeySwitchRelin(evalKey, ciphertext);
ct->SetKeyTag( evalKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("KeySwitchRelin operation has not been enabled");
}
}
LPEvalKey<Element> EvalMultKeyGen(const LPPrivateKey<Element> originalPrivateKey) const {
if(this->m_algorithmSHE) {
auto ek = this->m_algorithmSHE->EvalMultKeyGen(originalPrivateKey);
ek->SetKeyTag( originalPrivateKey->GetKeyTag() );
return ek;
} else {
throw std::logic_error("EvalMultKeyGen operation has not been enabled");
}
}
vector<LPEvalKey<Element>> EvalMultKeysGen(const LPPrivateKey<Element> originalPrivateKey) const {
if(this->m_algorithmSHE){
auto ek = this->m_algorithmSHE->EvalMultKeysGen(originalPrivateKey);
for(size_t i=0; i<ek.size(); i++)
ek[i]->SetKeyTag( originalPrivateKey->GetKeyTag() );
return ek;
}
else {
throw std::logic_error("EvalMultKeysGen operation has not been enabled");
}
}
Ciphertext<Element> EvalMultAndRelinearize(ConstCiphertext<Element> ct1,
ConstCiphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const {
if(this->m_algorithmSHE)
return this->m_algorithmSHE->EvalMultAndRelinearize(ct1, ct2, ek);
else {
throw std::logic_error("EvalMultAndRelinearize operation has not been enabled");
}
}
/////////////////////////////////////////
// the functions below are wrappers for things in LPFHEAlgorithm (FHE)
//
// TODO: Add Bootstrap and any other FHE methods
/////////////////////////////////////////
// the functions below are wrappers for things in LPSHEAlgorithm (SHE)
//
Ciphertext<Element> ModReduce(ConstCiphertext<Element> cipherText) const {
if(this->m_algorithmLeveledSHE) {
auto ct = this->m_algorithmLeveledSHE->ModReduce(cipherText);
ct->SetKeyTag( cipherText->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("ModReduce operation has not been enabled");
}
}
Ciphertext<Element> RingReduce(ConstCiphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const {
if(this->m_algorithmLeveledSHE){
auto ct = this->m_algorithmLeveledSHE->RingReduce(cipherText,keySwitchHint);
ct->SetKeyTag( keySwitchHint->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("RingReduce operation has not been enabled");
}
}
bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const {
if (this->m_algorithmLeveledSHE) {
return this->m_algorithmLeveledSHE->CanRingReduce(ringDimension, moduli, rootHermiteFactor);
}
else {
throw std::logic_error("CanRingReduce operation has not been enabled");
}
}
Ciphertext<Element> ComposedEvalMult(
ConstCiphertext<Element> cipherText1,
ConstCiphertext<Element> cipherText2,
const LPEvalKey<Element> quadKeySwitchHint) const {
if(this->m_algorithmLeveledSHE){
auto ct = this->m_algorithmLeveledSHE->ComposedEvalMult(cipherText1,cipherText2,quadKeySwitchHint);
ct->SetKeyTag( quadKeySwitchHint->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("ComposedEvalMult operation has not been enabled");
}
}
Ciphertext<Element> LevelReduce(ConstCiphertext<Element> cipherText1,
const LPEvalKeyNTRU<Element> linearKeySwitchHint) const {
if(this->m_algorithmLeveledSHE){
auto ct = this->m_algorithmLeveledSHE->LevelReduce(cipherText1,linearKeySwitchHint);
ct->SetKeyTag( linearKeySwitchHint->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("LevelReduce operation has not been enabled");
}
}
const LPEncryptionAlgorithm<Element>& getAlgorithm() const { return *m_algorithmEncryption; }
protected:
LPParameterGenerationAlgorithm<Element> *m_algorithmParamsGen;
LPEncryptionAlgorithm<Element> *m_algorithmEncryption;
LPPREAlgorithm<Element> *m_algorithmPRE;
LPMultipartyAlgorithm<Element> *m_algorithmMultiparty;
LPSHEAlgorithm<Element> *m_algorithmSHE;
LPFHEAlgorithm<Element> *m_algorithmFHE;
LPLeveledSHEAlgorithm<Element> *m_algorithmLeveledSHE;
};
} // namespace lbcrypto ends
#endif
|
GB_binop__max_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp32)
// A*D function (colscale): GB (_AxD__max_fp32)
// D*A function (rowscale): GB (_DxB__max_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__max_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__max_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp32)
// C=scalar+B GB (_bind1st__max_fp32)
// C=scalar+B' GB (_bind1st_tran__max_fp32)
// C=A+scalar GB (_bind2nd__max_fp32)
// C=A'+scalar GB (_bind2nd_tran__max_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = fmaxf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmaxf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_FP32 || GxB_NO_MAX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fmaxf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fmaxf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Step_TBB.h | #ifndef __Step_TBB_h__
#define __Step_TBB_h__
#include <chrono>
#include "Step.h"
int cal_substep_func_TBB(void* _self);
class Step_TBB : public Step
{
protected:
size_t thread_num;
double new_time;
double step_time_minus_tol;
double next_output_time_minus_tol;
bool output_not_needed, step_not_end;
bool continue_cal;
std::chrono::high_resolution_clock::time_point t0, t1;
std::chrono::microseconds cpu_time;
public:
Step_TBB(const char* _name, const char* _type = "Step_TBB",
CalSubstepFunc _cal_substep_func = &cal_substep_func_TBB);
~Step_TBB();
inline void set_thread_num(size_t th_num) noexcept { thread_num = th_num; }
int solve() override;
// this function need to be put into
// #pragma omp master
void continue_calculation();
void exit_calculation();
void abort_calculation();
// in microseconds
inline long long get_time() const noexcept { return std::chrono::duration_cast<std::chrono::microseconds>(cpu_time).count(); }
};
#endif |
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz,4)),4*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(16*t1+Ny+29,4)),floord(32*t2+Ny+28,4)),floord(32*t1-32*t2+Nz+Ny+27,4));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(4*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t3+Nx,2048),floord(Nt+Nx-4,2048)),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),4*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),4*t3+2),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
relu.c | /*
* Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CSI-NN2 version 1.12.x */
#include "csi_ref.h"
#include "csi_utils.h"
static float relu(float x) { return x > 0 ? x : 0; }
int csi_ref_relu_f32(struct csi_tensor *input, struct csi_tensor *output,
struct relu_params *params)
{
float *input_data = input->data;
float *output_data = output->data;
int size = 1;
for (int i = 0; i < input->dim_count; i++) {
size = size * input->dim[i];
}
#pragma omp parallel for num_threads(8)
for (int i = 0; i < size; i++) {
output_data[i] = relu(input_data[i]);
}
return CSINN_TRUE;
}
int csi_ref_relu_quant(struct csi_tensor *input, struct csi_tensor *output,
struct relu_params *params)
{
return csi_ref_siso_callback_base(input, output, params, csi_ref_relu_f32);
}
|
GB_unaryop__ainv_int32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_bool
// op(A') function: GB_tran__ainv_int32_bool
// C type: int32_t
// A type: bool
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_bool
(
int32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cluster_op_impl.h | /* The MIT License (MIT)
*
* (c) Jürgen Simon 2014 (juergen.simon@uni-bonn.de)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef M3D_OPERATION_CLUSTER_IMPL_H
#define M3D_OPERATION_CLUSTER_IMPL_H
#include <meanie3D/defines.h>
#include <meanie3D/namespaces.h>
#include <meanie3D/parallel.h>
#include <meanie3D/utils.h>
#include "detection.h"
#include <vector>
namespace m3D {
#if WRITE_MODES
static size_t s_pass_counter = 0;
template <typename T>
size_t
ClusterOperation<T>::pass_counter()
{
return s_pass_counter;
};
template <typename T>
void
ClusterOperation<T>::reset_pass_counter()
{
s_pass_counter = 0;
};
template <typename T>
void
ClusterOperation<T>::increment_pass_counter()
{
s_pass_counter++;
}
#endif
#pragma mark -
#pragma mark Clustering Code
template<typename T>
ClusterList<T> *
ClusterOperation<T>::cluster() {
using namespace m3D::utils::vectors;
const CoordinateSystem<T> *cs = m_context.coord_system;
vector<T> resolution;
if (m_context.search_params->search_type() == SearchTypeRange) {
RangeSearchParams<T> *p = (RangeSearchParams<T> *) m_context.search_params;
// Physical grid resolution in the
// spatial range
resolution = cs->resolution();
resolution = ((T) 4.0) * resolution;
// Supplement with bandwidth values for
// the value range
for (size_t i = resolution.size(); i < p->bandwidth.size(); i++) {
resolution.push_back(p->bandwidth[i]);
}
} else {
KNNSearchParams<T> *p = (KNNSearchParams<T> *) m_context.search_params;
resolution = p->resolution;
}
if (m_context.show_progress) {
cout << endl << "Creating meanshift vector graph ...";
start_timer();
m_progress_bar = new boost::progress_display(this->feature_space->size());
}
// Create an empty cluster list
ClusterList<T> *cluster_list = new ClusterList<T>(
m_params.filename,
m_params.variables,
m_params.dimensions,
m_params.dimension_variables,
m_params.time_index);
// Guard against empty feature-space
if (this->feature_space->points.size() == 0) {
cout << "Feature space is empty" << endl;
return cluster_list;
}
MeanshiftOperation <T> meanshiftOperator(this->feature_space, this->point_index);
meanshiftOperator.prime_index(m_context.search_params);
#if WITH_OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (size_t index = 0; index < this->feature_space->size(); index++) {
if (m_context.show_progress) {
#if WITH_OPENMP
#pragma omp critical
#endif
m_progress_bar->operator++();
}
// Get the meanshift vector for this point
typename Point<T>::ptr x = this->feature_space->points[index];
x->shift = meanshiftOperator.meanshift(x->values,
m_context.search_params,
m_context.kernel,
m_context.weight_function);
// Extract the spatial component and obtain the grid
vector<T> spatial_shift = this->feature_space->spatial_component(x->shift);
x->gridded_shift = this->feature_space->coordinate_system->to_gridpoints(spatial_shift);
}
if (m_context.show_progress) {
cout << "done. (" << stop_timer() << "s)" << endl;
delete m_progress_bar;
m_progress_bar = NULL;
}
// Analyse the graph and create clusters
cluster_list->aggregate_cluster_graph(
this->feature_space,
m_context.weight_function,
m_params.coalesceWithStrongestNeighbour,
m_context.show_progress);
// Provide fresh ids right away
m3D::uuid_t uuid = 0;
ClusterUtils<T>::provideUuids(cluster_list, uuid);
m3D::id_t id = 0;
ClusterUtils<T>::provideIds(cluster_list, id);
// cout << "Cluster list after aggregation:" << endl;
// cluster_list.print();
// Replace points with original data ()
ClusterUtils<T>::replace_points_from_datastore(cluster_list, m_context.data_store);
// cout << "Cluster list after filtering points:" << endl;
// cluster_list.print();
// Find margin points (#325)
ClusterUtils<T>::obtain_margin_flag(cluster_list, this->feature_space);
#if WRITE_BOUNDARIES
cluster_list.write_boundaries(weight_function, this->feature_space, this->point_index, resolution);
#endif
#if WRITE_MODES
#if WITH_VTK
size_t min_size = std::numeric_limits<size_t>::max();
size_t max_size = std::numeric_limits<size_t>::min();
for (size_t i = 0; i < cluster_list.size(); i++) {
if (cluster_list[i]->size() < min_size) {
min_size = cluster_list[i]->size();
}
if (cluster_list[i]->size() > max_size) {
max_size = cluster_list[i]->size();
}
}
NetCDFDataStore<T> *ds = (NetCDFDataStore<T> *) this->feature_space->data_store();
std::string fn = ds->filename() + "-modes-" + boost::lexical_cast<string>(pass_counter()) + ".vtk";
VisitUtils<T>::write_cluster_modes_vtk(fn, cluster_list.clusters);
fn = ds->filename() + "-raw-modes-" + boost::lexical_cast<string>(pass_counter()) + ".vtk";
VisitUtils<T>::write_modes_vtk(fn, cluster_list.trajectory_endpoints(), cluster_list.trajectory_lengths());
VisitUtils<T>::write_cluster_modes_vtk(fn, cluster_list.clusters);
#endif
#endif
return cluster_list;
}
}
#endif
|
omp50_task_depend_mtx3.c | // RUN: %libomp-compile-and-run
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// UNSUPPORTED: clang-3, clang-4, clang-5, clang-6, clang-7, clang-8
// TODO: update expected result when icc supports mutexinoutset
// XFAIL: icc
// REQUIRES: !abt
// Tests OMP 5.0 task dependences "mutexinoutset", emulates compiler codegen
// Mutually exclusive tasks get same input dependency info array
//
// Task tree created:
// task0 task1
// \ / \
// task2 task5
// / \
// task3 task4
// / \
// task6 <-->task7 (these two are mutually exclusive)
// \ /
// task8
//
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
static int checker = 0; // to check if two tasks run simultaneously
static int err = 0;
#ifndef DELAY
#define DELAY 0.1
#endif
int mutex_task(int task_id) {
int th = omp_get_thread_num();
#pragma omp atomic
++checker;
printf("task %d, th %d\n", task_id, th);
if (checker != 1) {
err++;
printf("Error1, checker %d != 1\n", checker);
}
my_sleep(DELAY);
if (checker != 1) {
err++;
printf("Error2, checker %d != 1\n", checker);
}
#pragma omp atomic
--checker;
return 0;
}
int main()
{
int i1,i2,i3,i4;
omp_set_num_threads(2);
#pragma omp parallel
{
#pragma omp single nowait
{
int t = omp_get_thread_num();
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 0_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(in: i1, i3)
{ int th = omp_get_thread_num();
printf("task 1_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(in: i2) depend(out: i1)
{ int th = omp_get_thread_num();
printf("task 2_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 3_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(out: i2)
{ int th = omp_get_thread_num();
printf("task 4_%d, th %d\n", t, th);
my_sleep(DELAY+0.1); } // wait a bit longer than task 3
#pragma omp task depend(out: i3)
{ int th = omp_get_thread_num();
printf("task 5_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(mutexinoutset: i1, i4)
{ mutex_task(6); }
#pragma omp task depend(mutexinoutset: i1, i4)
{ mutex_task(7); }
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 8_%d, th %d\n", t, th);
my_sleep(DELAY); }
} // single
} // parallel
if (err == 0) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
test6.c | //#include<omp.h>
int main() {
#pragma omp parallel
{
int i, x;
int y;
#pragma omp for nowait
for(i = 0; i < 10; i++) {
#pragma omp critical (someName)
printf("1.) Iteration %d with thread %d\n", i, omp_get_thread_num());
if(omp_get_thread_num()%2 == 0)
sleep(5);
}
#pragma omp for nowait
for(i = 0; i < 10; i++) {
#pragma omp critical
{
printf("2.) Iteration %d with thread %d\n", i, omp_get_thread_num());
#pragma omp atomic
x = x + 1;
#pragma omp atomic update
x = x + 1;
#pragma omp atomic read
y = x;
#pragma omp atomic write
x = y;
#pragma omp atomic capture
x = y = 10;
}
}
#pragma omp atomic
x = x + 1;
#pragma omp atomic update
x = x + 1;
#pragma omp atomic read
y = x;
#pragma omp atomic write
x = y;
#pragma omp atomic capture
x = y = 10;
y++;
}
}
|
GB_reduce_to_vector.c | //------------------------------------------------------------------------------
// GB_reduce_to_vector: reduce a matrix to a vector using a binary op
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLS: GB_build
// C<M> = accum (C,reduce(A)) where C is n-by-1. Reduces a matrix A or A'
// to a vector.
#include "GB_reduce.h"
#include "GB_build.h"
#include "GB_ek_slice.h"
#include "GB_accum_mask.h"
#ifndef GBCOMPACT
#include "GB_red__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Wfirst_space, ntasks, zsize) ; \
GB_FREE_MEMORY (Wlast_space, ntasks, zsize) ; \
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_MATRIX_FREE (&T) ; \
}
GrB_Info GB_reduce_to_vector // C<M> = accum (C,reduce(A))
(
GrB_Matrix C, // input/output for results, size n-by-1
const GrB_Matrix M, // optional M for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C,T)
const GrB_BinaryOp reduce, // reduce operator for T=reduce(A)
const GB_void *terminal, // for early exit (NULL if none)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc, // descriptor for C, M, and A
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// C may be aliased with M and/or A
GB_RETURN_IF_NULL_OR_FAULTY (C) ;
GB_RETURN_IF_FAULTY (M) ;
GB_RETURN_IF_FAULTY (accum) ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
GB_RETURN_IF_FAULTY (desc) ;
ASSERT_OK (GB_check (C, "C input for reduce_BinaryOp", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for reduce_BinaryOp", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (accum, "accum for reduce_BinaryOp", GB0)) ;
ASSERT_OK (GB_check (reduce, "reduce for reduce_BinaryOp", GB0)) ;
ASSERT_OK (GB_check (A, "A input for reduce_BinaryOp", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (desc, "desc for reduce_BinaryOp", GB0)) ;
GrB_Matrix T = NULL ;
int ntasks = 0 ;
size_t zsize = 0 ;
int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ;
GB_void *restrict Wfirst_space = NULL ;
GB_void *restrict Wlast_space = NULL ;
// get the descriptor
GB_GET_DESCRIPTOR (info, desc, C_replace, Mask_comp, A_transpose, xx1, xx2);
// C and M are n-by-1 GrB_Vector objects, typecasted to GrB_Matrix
ASSERT (GB_VECTOR_OK (C)) ;
ASSERT (GB_IMPLIES (M != NULL, GB_VECTOR_OK (M))) ;
// check domains and dimensions for C<M> = accum (C,T)
GrB_Type ttype = reduce->ztype ;
GB_OK (GB_compatible (C->type, C, M, accum, ttype, Context)) ;
// check types of reduce
if (reduce->xtype != reduce->ztype || reduce->ytype != reduce->ztype)
{
// all 3 types of z = reduce (x,y) must be the same. reduce must also
// be associative but there is no way to check this in general.
return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG,
"All domains of reduction operator must be identical;\n"
"operator is: [%s] = %s ([%s],[%s])", reduce->ztype->name,
reduce->name, reduce->xtype->name, reduce->ytype->name))) ;
}
// T = reduce (T,A) must be compatible
if (!GB_Type_compatible (A->type, reduce->ztype))
{
return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG,
"incompatible type for reduction operator z=%s(x,y):\n"
"input matrix A of type [%s]\n"
"cannot be typecast to reduction operator of type [%s]",
reduce->name, A->type->name, reduce->ztype->name))) ;
}
// check the dimensions
int64_t n = GB_NROWS (C) ;
if (A_transpose)
{
if (n != GB_NCOLS (A))
{
return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG,
"w=reduce(A'): length of w is "GBd";\n"
"it must match the number of columns of A, which is "GBd".",
n, GB_NCOLS (A)))) ;
}
}
else
{
if (n != GB_NROWS(A))
{
return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG,
"w=reduce(A): length of w is "GBd";\n"
"it must match the number of rows of A, which is "GBd".",
n, GB_NROWS (A)))) ;
}
}
// quick return if an empty mask is complemented
GB_RETURN_IF_QUICK_MASK (C, C_replace, M, Mask_comp) ;
//--------------------------------------------------------------------------
// delete any lingering zombies and assemble any pending tuples
//--------------------------------------------------------------------------
// GB_WAIT (C) ;
GB_WAIT (M) ;
GB_WAIT (A) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
//--------------------------------------------------------------------------
// handle the CSR/CSC format of A
//--------------------------------------------------------------------------
// the result vector T is in CSC format
if (!(A->is_csc))
{
A_transpose = !A_transpose ;
}
//--------------------------------------------------------------------------
// T = reduce (A) or reduce (A')
//--------------------------------------------------------------------------
// T is created below so that it can be typecasted to a GrB_Vector when
// done: non-hypersparse n-by-1 matrix in CSC format.
// T = reduce_to_vector (A) or reduce_to_vector (A'), which is T = sum (A')
// or sum (A), in MATLAB notation, except where where 'sum' is any
// associative operator.
// By default, T(i) = op (A (i,:)) is a vector whose length is the same as
// the number of rows of A. T(i) is the reduction of all entries in the
// ith row of A. If A_transpose is true, the T is computed as if A were
// transposed first, and thus its length is equal to the number of vectors
// of the input matrix A. The use of A_transpose is the opposite of
// MATLAB, since sum(A) in MATLAB sums up the columns of A, and sum(A')
// sums up the rows of A..
// T is an n-by-1 GrB_Matrix that represents the vector. It is computed
// as a GrB_Matrix so it can be passed to GB_accum_mask without
// typecasting.
ASSERT (n == (A_transpose) ? A->vdim : A->vlen) ;
//--------------------------------------------------------------------------
// scalar workspace
//--------------------------------------------------------------------------
size_t asize = A->type->size ;
GB_Type_code acode = A->type->code ;
const int64_t *restrict Ai = A->i ;
const GB_void *restrict Ax = A->x ;
int64_t anvec = A->nvec ;
int64_t anz = GB_NNZ (A) ;
zsize = reduce->ztype->size ;
GB_Type_code zcode = reduce->ztype->code ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// T = reduce(A) or reduce(A')
//--------------------------------------------------------------------------
GxB_binary_function freduce = reduce->function ;
GB_cast_function cast_A_to_Z = GB_cast_factory (zcode, acode) ;
bool nocasting = (A->type == reduce->ztype) ;
if (A_transpose)
{
//----------------------------------------------------------------------
// T = reduce(A'), where T(j) = reduce (A (:,j))
//----------------------------------------------------------------------
// Each vector A(:,j) is reduced to the scalar T(j)
//----------------------------------------------------------------------
// allocate T, including T->p, T->i, and T->x. T is not hypersparse.
//----------------------------------------------------------------------
// since T is a GrB_Vector, it is CSC and not hypersparse
GB_CREATE (&T, ttype, n, 1, GB_Ap_calloc, true,
GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, anvec, true, Context) ;
GB_OK (info) ;
ASSERT (GB_VECTOR_OK (T)) ;
T->p [0] = 0 ;
T->p [1] = anvec ;
int64_t *restrict Ti = T->i ;
GB_void *restrict Tx = T->x ;
T->nvec_nonempty = (anvec > 0) ? 1 : 0 ;
T->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// symbolic phase
//----------------------------------------------------------------------
// Construct the pattern of T. The kth vector in A creates one entry
// in T, but it is flagged as a zombie if it is empty.
int64_t nzombies = 0 ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
int nth = GB_nthreads (anvec, chunk, nthreads_max) ;
#pragma omp parallel for num_threads(nth) schedule(static) \
reduction(+:nzombies)
for (int64_t k = 0 ; k < anvec ; k++)
{
// if A(:,j) is empty, then the entry in T becomes a zombie
int64_t j = (Ah == NULL) ? k : Ah [k] ;
int64_t jnz = Ap [k+1] - Ap [k] ;
if (jnz == 0)
{
// A(:,j) is empty: T(j) is a zombie
Ti [k] = GB_FLIP (j) ;
nzombies++ ;
}
else
{
// A(:,j) has at least one entry; T(j) is live
Ti [k] = j ;
}
}
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = anvec - nzombies ;
}
ASSERT (A->nvec_nonempty == (anvec - nzombies)) ;
T->nzombies = nzombies ;
//----------------------------------------------------------------------
// slice the entries of A for the numeric phase
//----------------------------------------------------------------------
// Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1
// and vectors kfirst_slice [tid] to klast_slice [tid]. The first and
// last vectors may be shared with prior slices and subsequent slices.
ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, anz) ;
ntasks = GB_IMAX (ntasks, 1) ;
GB_MALLOC_MEMORY (Wfirst_space, ntasks, zsize) ;
GB_MALLOC_MEMORY (Wlast_space, ntasks, zsize) ;
if (Wfirst_space == NULL || Wlast_space == NULL ||
!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, ntasks))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numeric phase: launch the switch factory
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
#define GB_red(opname,aname) GB_red_eachvec_ ## opname ## aname
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, A, \
kfirst_slice, klast_slice, pstart_slice, \
Wfirst_space, Wlast_space, ntasks, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
if (nocasting)
{
// controlled by opcode and typecode. No typecasting is done.
GB_Opcode opcode = reduce->opcode ;
GB_Type_code typecode = acode ;
ASSERT (typecode <= GB_UDT_code) ;
#include "GB_red_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: with typecasting
//----------------------------------------------------------------------
if (!done)
{
#define GB_ATYPE GB_void
#define GB_CTYPE GB_void
// ztype s ;
#define GB_SCALAR(s) \
GB_void s [GB_PGI(zsize)]
// ztype s = (ztype) Ax [p], with typecast
#define GB_CAST_ARRAY_TO_SCALAR(s,Ax,p) \
cast_A_to_Z (s, Ax +((p)*asize), zsize) ; \
// s += (ztype) Ax [p], with typecast
#define GB_ADD_CAST_ARRAY_TO_SCALAR(s, Ax, p) \
GB_void awork [GB_PGI(zsize)] ; \
cast_A_to_Z (awork, Ax +((p)*asize), zsize) ; \
freduce (s, s, awork) ;
// W [k] = s, no typecast
#define GB_COPY_SCALAR_TO_ARRAY(W,k,s) \
memcpy (W +((k)*zsize), s, zsize) ;
// W [k] = S [i], no typecast
#define GB_COPY_ARRAY_TO_ARRAY(W,k,S,i) \
memcpy (W +((k)*zsize), S +((i)*zsize), zsize) ;
// W [k] += S [i], no typecast
#define GB_ADD_ARRAY_TO_ARRAY(W,k,S,i) \
freduce (W +((k)*zsize), W +((k)*zsize), S +((i)*zsize)) ;
// W [k] += s, no typecast
#define GB_ADD_SCALAR_TO_ARRAY(W,k,s) \
freduce (W +((k)*zsize), W +((k)*zsize), s) ;
// break if terminal value reached
#define GB_BREAK_IF_TERMINAL(t) \
if (terminal != NULL) \
{ \
if (memcmp (t, terminal, zsize) == 0) break ; \
}
#include "GB_reduce_each_vector.c"
}
//----------------------------------------------------------------------
// wrapup: delete any zombies
//----------------------------------------------------------------------
ASSERT_OK (GB_check (T, "T before wait", GB_FLIP (GB0)));
if (nzombies > 0)
{
ASSERT (GB_VECTOR_OK (T)) ;
ASSERT (!GB_PENDING (T)) ;
ASSERT (GB_ZOMBIES (T)) ;
GB_OK (GB_wait (T, Context)) ;
}
ASSERT_OK (GB_check (T, "T output = reduce_each_vector (A)", GB0)) ;
}
else
{
//----------------------------------------------------------------------
// T = reduce(A), where T(i) = reduce (A (i,:))
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
// When A_transpose is false (after flipping it to account for the
// CSR/CSC format), n is A->vlen, the vector length of A. This is
// the number of rows of a CSC matrix, or the # of columns of a CSR
// matrix. The matrix A itself requires O(vdim+anz) memory if
// non-hypersparse and O(anz) if hypersparse. This does not depend on
// A->vlen. So if the vector length is really huge (when anz << n),
// the bucket method would fail. Thus, the qsort method, below, is
// used when A is very sparse.
if (GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, n))
{
//------------------------------------------------------------------
// qsort method
//------------------------------------------------------------------
// memory usage is O(anz) and time is O(anz*log(anz)). This is
// more efficient than the bucket method, below, when A is very
// hypersparse. The time and memory complexity does not depend
// on n.
// since T is a GrB_Vector, it is not hypersparse
GB_NEW (&T, ttype, n, 1, GB_Ap_null, true, GB_FORCE_NONHYPER,
GB_HYPER_DEFAULT, 1, Context) ;
GB_OK (info) ;
// GB_build treats Ai and Ax as read-only; they must not be modified
GB_OK (GB_build
(
T, // construct result in the T vector
(GrB_Index *) Ai, // indices inside the vector
NULL, // vector indices (none)
Ax, // values, of size anz
anz, // number of tuples
reduce, // reduction operator
acode, // type code of the Ax array
false, // the input is a vector
false, // indices do not need to be checked
Context
)) ;
ASSERT (T->nvec_nonempty == GB_nvec_nonempty (T, NULL)) ;
}
else
{
//------------------------------------------------------------------
// bucket method
//------------------------------------------------------------------
// Determine number of threads to use for constructing the buckets.
// Each thread requires O(n) workspace, so this method does not
// scale well when there are many threads compared to anz. Total
// workspace is O(n*ntasks), so limit the # of threads used so that
// at most anz workspace is used. Each thread takes a single task.
ntasks = (n > 0) ? (anz / n) : 1 ;
ntasks = GB_IMIN (ntasks, nthreads) ;
ntasks = GB_IMAX (ntasks, 1) ;
int nth = ntasks ; // one thread per task
//------------------------------------------------------------------
// slice the entries for each thread
//------------------------------------------------------------------
// Thread tid does entries pstart_slice [tid] to
// pstart_slice [tid+1]-1. No need to compute kfirst or klast.
GB_MALLOC_MEMORY (pstart_slice, ntasks+1, sizeof (int64_t)) ;
if (pstart_slice == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
GB_eslice (pstart_slice, anz, ntasks) ;
//------------------------------------------------------------------
// sum across each index: T(i) = reduce (A (i,:))
//------------------------------------------------------------------
// Early exit cannot be exploited; ignore the terminal value.
#undef GB_red
#define GB_red(opname,aname) GB_red_eachindex_ ## opname ## aname
#undef GB_RED_WORKER
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) (&T, ttype, A, pstart_slice, \
ntasks, nthreads, Context) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
bool done = false ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#ifndef GBCOMPACT
if (nocasting)
{
// controlled by opcode and typecode. No typecasting
GB_Opcode opcode = reduce->opcode ;
GB_Type_code typecode = acode ;
ASSERT (typecode <= GB_UDT_code) ;
#include "GB_red_factory.c"
if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE))
{
// out of memory
GB_FREE_ALL ;
return (info) ;
}
}
#endif
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
#include "GB_reduce_each_index.c"
}
}
ASSERT_OK (GB_check (T, "T output for T = reduce_each_index (A)", GB0));
}
//--------------------------------------------------------------------------
// C<M> = accum (C,T): accumulate the results into C via the mask
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (GB_accum_mask (C, M, NULL, accum, &T, C_replace, Mask_comp,
Context)) ;
}
|
GB_binop__iseq_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint16)
// A*D function (colscale): GB (_AxD__iseq_uint16)
// D*A function (rowscale): GB (_DxB__iseq_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint16)
// C=scalar+B GB (_bind1st__iseq_uint16)
// C=scalar+B' GB (_bind1st_tran__iseq_uint16)
// C=A+scalar GB (_bind2nd__iseq_uint16)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT16 || GxB_NO_ISEQ_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
8354.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(2)
{
#pragma omp for schedule(static, 16)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(static, 16)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
test.c | /*
============================================================================
Name : merge.c
Author : qwinpin
Version :
Copyright : Your copyright notice
Description : Hello World in C, Ansi-style
============================================================================
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int cmpfunc (const void * a, const void * b) {
return ( *(int*)a - *(int*)b );
}
void merge(int *a, int *b, int start, int mid, int end){
int i_start, i_mid, i;
for(i_start = start, i_mid = mid + 1, i = start; i_start <= mid && i_mid <= end; i++){
if(a[i_start] <= a[i_mid]){
b[i] = a[i_start++];
} else{
b[i] = a[i_mid++];
}
}
while(i_start <= mid){
b[i++] = a[i_start++];
}
while(i_mid <= end){
b[i++] = a[i_mid++];
}
for(i = start; i <= end; i++){
a[i] = b[i];
}
}
void splitter(int *a, int *b, int start, int end){
if(start < end){
int mid = (start + end) / 2;
splitter(a, b, start, mid);
splitter(a, b, mid + 1, end);
merge(a, b, start, mid, end);
}
}
int main(){
printf("hello\n");
int max;
printf("Enter array size\n");
scanf("%d", &max);
int mp;
printf("Use single/parallel/qsort? 0/1/2\n");
scanf("%d", &mp);
int split = 4;
int st;
int st2;
int* a = (int*)malloc(max * sizeof(int));
int* b = (int*)malloc(max * sizeof(int));
int i, j;
srand(time(NULL));
for (i = 0; i < max; i++){
a[i] = rand();
}
for (i = 0; i <= 10 && i < max; i++){
printf("%d ", a[i]);
}
printf("\n");
clock_t start = clock(), diff;
if (mp == 1){
printf("Parallel");
#pragma omp parallel for ordered
for (j = 0; j < split; j++){
st = j * (max / split);
st2 = (st + max / split) - 1;
#pragma omp parallel
splitter(a, b, st, st2);
}
st = max / split;
for (i = 0; i < split-1; i++){
st = st + max / split;
merge(a, b, 0, st - max / split - 1, st - 1);
}
}
if (mp == 0){
printf("Single");
splitter(a, b, 0, max-1);
}
if (mp == 2){
printf("Library");
qsort(a, max, sizeof(int), cmpfunc);
}
diff = clock() - start;
for (i = 1; i < max - 1; i++){
if (a[i] < a[i-1]){
printf("\nbad ");
printf("%d - %d, %d - %d\n", i, a[i], i-1, a[i-1]);
//break;
}
}
// for (i = 0; i < max; i++){
// printf("%d, ", a[i]);
// }
printf("\n");
printf("It took %li sec %li milisec", diff / CLOCKS_PER_SEC, diff * 1000 / CLOCKS_PER_SEC % 1000);
printf("\n%f\n", (double)diff / CLOCKS_PER_SEC * 1000);
free(a);
free(b);
return 0;
}
int main2(){
int size = 1000;
int size_max = 2000000;
int size_step = 100000;
int i, j;
int* a;
int* b;
int st, st2;
srand(time(NULL));
FILE *f = fopen("single_process.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
a = (int*)malloc(size * sizeof(int));
b = (int*)malloc(size * sizeof(int));
while (size < size_max){
for (i = 0; i < size; i++){
a[i] = rand();
}
clock_t start = clock(), diff;
splitter(a, b, 0, size-1);
diff = clock() - start;
fprintf(f, "%f, ", (double)(diff) / CLOCKS_PER_SEC * 1000);
size = size + size_step;
a = (int*)realloc(a, size * sizeof(int));
b = (int*)realloc(b, size * sizeof(int));
}
fclose(f);
FILE *f2 = fopen("parallel_process.txt", "w");
if (f2 == NULL)
{
printf("Error opening file!\n");
exit(1);
}
int split = 10;
while (split <= 100){
printf("Splits num: %d\n", split);
size = 1000;
a = (int*)realloc(a, size * sizeof(int));
b = (int*)realloc(b, size * sizeof(int));
fprintf(f2, "\nSplits - %d:\n ", split);
while (size < size_max){
printf("Size %d\n", size);
for (j = 0; j < size; j++){
a[j] = rand();
}
clock_t start = clock(), diff;
#pragma omp parallel for
for (j = 0; j < split; j++){
st = j * (size / split);
st2 = (st + size / split) - 1;
#pragma omp parallel
splitter(a, b, st, st2);
}
int st = size / split;
for (j = 0; j < split - 1; j++){
st = st + size / split;
merge(a, b, 0, st - size / split - 1, st - 1);
}
diff = clock() - start;
printf("%li", diff);
fprintf(f2, "%f, ", (double)(diff) / CLOCKS_PER_SEC * 1000);
size = size + size_step;
a = (int*)realloc(a, size * sizeof(int));
b = (int*)realloc(b, size * sizeof(int));
}
split = split + 10;
}
fclose(f2);
FILE *f3 = fopen("qsort_process.txt", "w");
if (f3 == NULL)
{
printf("Error opening file!\n");
exit(1);
}
size = 100;
a = (int*)realloc(a, size * sizeof(int));
while (size < size_max){
for (i = 0; i < size; i++){
a[i] = rand();
}
clock_t start = clock(), diff;
qsort(a, size, sizeof(int), cmpfunc);
diff = clock() - start;
fprintf(f3, "%f, ", (double)(diff) / CLOCKS_PER_SEC * 1000);
size = size + size_step;
a = (int*)realloc(a, size * sizeof(int));
}
fclose(f3);
free(a);
free(b);
return 0;
}
|
silo-check.c | int main() {
double **u;
double **w;
unsigned long int _imopVarPre147;
void *_imopVarPre148;
_imopVarPre147 = 500 * sizeof(double *);
_imopVarPre148 = malloc(_imopVarPre147);
u = (double **) _imopVarPre148;
unsigned long int _imopVarPre151;
void *_imopVarPre152;
_imopVarPre151 = 500 * sizeof(double *);
_imopVarPre152 = malloc(_imopVarPre151);
w = (double **) _imopVarPre152;
int p;
for (p = 0; p < 500; p++) {
unsigned long int _imopVarPre155;
void *_imopVarPre156;
_imopVarPre155 = 500 * sizeof(double);
_imopVarPre156 = malloc(_imopVarPre155);
u[p] = (double *) _imopVarPre156;
unsigned long int _imopVarPre159;
void *_imopVarPre160;
_imopVarPre159 = 500 * sizeof(double);
_imopVarPre160 = malloc(_imopVarPre159);
w[p] = (double *) _imopVarPre160;
}
#pragma omp parallel
{
int i, j;
#pragma omp for nowait
for (i = 1; i < 500 - 1; i++) {
for (j = 1; j < 500 - 1; j++) {
w[i][j] =
(u[i - 1][j] + u[i + 1][j] + u[i][j - 1] + u[i][j + 1])
/ 4.0;
}
}
#pragma omp barrier
my_diff = 0.0;
#pragma omp for nowait
for (i = 1; i < 500 - 1; i++) {
for (j = 1; j < 500 - 1; j++) {
double _imopVarPre167;
double _imopVarPre168;
_imopVarPre167 = w[i][j] - u[i][j];
_imopVarPre168 = fabs(_imopVarPre167);
if (my_diff < _imopVarPre168) {
double _imopVarPre170;
double _imopVarPre171;
_imopVarPre170 = w[i][j] - u[i][j];
_imopVarPre171 = fabs(_imopVarPre170);
my_diff = _imopVarPre171;
}
}
}
}
}
|
problem.p4.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
// should be continuous in u, u', and u''
// v(w) = w^4 - 2w^3 + w^2 + c
// u(x,y,z) = v(x)v(y)v(z)
// If Periodic, then the integral of the RHS should sum to zero.
// Setting shift=1/30 should ensure that the integrals of X, Y, or Z should sum to zero...
// That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero
// If dirichlet, then w(0)=w(1) = 0.0
// Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary
double shift = 0.0;if(isPeriodic)shift= -1.0/30.0;
double X = 1.0*pow(x,4) - 2.0*pow(x,3) + 1.0*pow(x,2) + shift;
double Y = 1.0*pow(y,4) - 2.0*pow(y,3) + 1.0*pow(y,2) + shift;
double Z = 1.0*pow(z,4) - 2.0*pow(z,3) + 1.0*pow(z,2) + shift;
double Xx = 4.0*pow(x,3) - 6.0*pow(x,2) + 2.0*x;
double Yy = 4.0*pow(y,3) - 6.0*pow(y,2) + 2.0*y;
double Zz = 4.0*pow(z,3) - 6.0*pow(z,2) + 2.0*z;
double Xxx = 12.0*pow(x,2) - 12.0*x + 2.0;
double Yyy = 12.0*pow(y,2) - 12.0*y + 2.0;
double Zzz = 12.0*pow(z,2) - 12.0*z + 2.0;
*U = X*Y*Z;
*Ux = Xx*Y*Z;
*Uy = X*Yy*Z;
*Uz = X*Y*Zz;
*Uxx = Xxx*Y*Z;
*Uyy = X*Yyy*Z;
*Uzz = X*Y*Zzz;
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
#ifdef VECTOR_ALPHA
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
#endif
//level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U; // obviated by Richardson analysis
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
7z_fmt_plug.c | /*
* 7-Zip cracker patch for JtR. Hacked together during June of 2013 by Dhiru
* Kholia <dhiru at openwall.com>. Unicode support and other fixes by magnum.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>
* and Copyright (c) 2013-2017 magnum, and it is hereby released to the general
* public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
/*
* We've seen one single sample where we could not trust the padding check
* (early rejection). To be able to crack such hashes, define this to 0.
* This hits performance in some cases.
*/
#define TRUST_PADDING 0
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sevenzip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sevenzip);
#else
#include <string.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#if !ARCH_LITTLE_ENDIAN
#undef SIMD_COEF_32
#undef SIMD_PARA_SHA256
#endif
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "aes.h"
#include "sha2.h"
#include "crc32.h"
#include "unicode.h"
#include "dyna_salt.h"
#include "lzma/LzmaDec.h"
#include "lzma/Lzma2Dec.h"
#define FORMAT_LABEL "7z"
#define FORMAT_NAME "7-Zip"
#define FORMAT_TAG "$7z$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT " (512K iterations)"
#define BENCHMARK_LENGTH 0
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt*)
#define SALT_ALIGN sizeof(struct custom_salt*)
#ifndef OMP_SCALE
#define OMP_SCALE 1 // tuned on core i7
#endif
#ifdef SIMD_COEF_32
#include "simd-intrinsics.h"
#define NBKEYS (SIMD_COEF_32*SIMD_PARA_SHA256)
#define GETPOS(i,idx) ( (idx&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)idx/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 )
#define HASH_IDX_IN(idx) (((unsigned int)idx&(SIMD_COEF_32-1))+(unsigned int)idx/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32)
#define HASH_IDX_OUT(idx) (((unsigned int)idx&(SIMD_COEF_32-1))+(unsigned int)idx/SIMD_COEF_32*8*SIMD_COEF_32)
#define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME " AES"
#define PLAINTEXT_LENGTH 28
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#else
#define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR " AES"
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#include "memdbg.h"
static struct fmt_tests sevenzip_tests[] = {
/* CRC checks passes for this hash (4 bytes of padding) */
{"$7z$128$19$0$1122$8$a264c94f2cd72bec0000000000000000$725883103$112$108$64749c0963e20c74602379ca740165b9511204619859d1914819bc427b7e5f0f8fc67f53a0b53c114f6fcf4542a28e4a9d3914b4bc76baaa616d6a7ec9efc3f051cb330b682691193e6fa48159208329460c3025fb273232b82450645f2c12a9ea38b53a2331a1d0858813c8bf25a831", "openwall"},
/* LZMA before CRC (9 bytes of padding) */
{"$7z$1$19$0$1122$8$732b59fd26896e410000000000000000$2955316379$192$183$7544a3a7ec3eb99a33d80e57907e28fb8d0e140ec85123cf90740900429136dcc8ba0692b7e356a4d4e30062da546a66b92ec04c64c0e85b22e3c9a823abef0b57e8d7b8564760611442ecceb2ca723033766d9f7c848e5d234ca6c7863a2683f38d4605322320765938049305655f7fb0ad44d8781fec1bf7a2cb3843f269c6aca757e509577b5592b60b8977577c20aef4f990d2cb665de948004f16da9bf5507bf27b60805f16a9fcc4983208297d3affc4455ca44f9947221216f58c337f$232$5d00000100", "password"},
/* CRC checks passes for this hash (no padding) */
{"$7z$0$19$0$1122$8$d1f50227759415890000000000000000$1412385885$112$112$5e5b8b734adf52a64c541a5a5369023d7cccb78bd910c0092535dfb013a5df84ac692c5311d2e7bbdc580f5b867f7b5dd43830f7b4f37e41c7277e228fb92a6dd854a31646ad117654182253706dae0c069d3f4ce46121d52b6f20741a0bb39fc61113ce14d22f9184adafd6b5333fb1", "password"},
/* This requires LZMA (no padding) */
{"$7z$1$19$0$1122$8$5fdbec1569ff58060000000000000000$2465353234$112$112$58ba7606aafc7918e3db7f6e0920f410f61f01e9c1533c40850992fee4c5e5215bc6b4ea145313d0ac065b8ec5b47d9fb895bb7f97609be46107d71e219544cfd24b52c2ecd65477f72c466915dcd71b80782b1ac46678ab7f437fd9f7b8e9d9fad54281d252de2a7ae386a65fc69eda$176$5d00000100", "password"},
/* Length checks */
{"$7z$128$19$0$1122$8$94fb9024fdd3e6c40000000000000000$3965424295$112$99$1127828817ff126bc45ff3c5225d9d0c5d00a52094909674e6ed3dc431546d9a672738f2fa07556340d604d2efd2901b9d2ac2c0686c25af9c520c137b16c50c54df8703fd0b0606fa721ad70aafb9c4e3b288ef49864e6034021969b4ce11e3b8e269a92090ccf593c6a0da06262116", ""},
{"$7z$128$19$0$1122$8$6fd059d516d5490f0000000000000000$460747259$112$99$af163eb5532c557efca78fbb448aa04f348cd258c94233e6669f4e5025f220274c244d4f2347a7512571d9b6015a1e1a90e281983b743da957437b33092eddb55a5bc76f3ab6c7dbabb001578d1043285f5fa791fd94dd9779b461e44cbfe869f891007335b766774ccee3813ec8cd57", "&"},
{"$7z$128$19$0$1122$8$6d4a12af68d83bfe0000000000000000$993697592$112$99$7c308faa36b667599ee4418435ab621884c5c115ee3b70be454fe99236422f4f2d5cd9c8fcfbe6b6b0805ee602ce8488a08f7ea14a4f5c0c060fc685bff187720a402b23a5cfe3c9c5a5ae07f91209031b8f9804ac10459e15a0158031f6c58e507401ec6e1e6de8f64d94201159432b", "&'"},
{"$7z$128$19$0$1122$8$7527d758a59181830000000000000000$3917710544$112$99$61a9ca9e835bd0f2dc474b34d5d89bcf8cd1bb071a984ee1dcf224174a60bcee140fcf2fde8927fe4f3f4eb4a2cc39faff73f1898ae25cc92bd02939f4317ebb173bf3b6f01eef183163ddd533ad5c076f87341bd8b86d8460c68fc390aa8df89fc4076bdfd24e157f6c07e105c07612", "&'("},
{"$7z$128$19$0$1122$8$68928bade860a2b80000000000000000$3235890186$112$99$4b685a569c3aed78d217bae9ec64fa06b614df55c1cb0d160563d87efe38813accb38dd7037f86cebc91751c2488769c7398dfefaf491c024f2d640dcb388a56404cd5ac475ba16b5f8206fa45d5923b3a0c8dd0f24460ccee0d93bea03ad58b8a8db502a55ba1775560b3d194f342f7", "&'()"},
{"$7z$128$19$0$1122$8$81931b9ba0b069820000000000000000$3094344848$112$99$fdbb2622143d25b13992b1467ce9edce4e3df8ca07535735b76e8abcb0791e384a1d5547483e19c3bd6e5a0742d29c403cfc8b3a003b285e80b350ea9157600eb91c49b329903de9ec9b17d1c95b0e136b579e165a6e80550464fa99830bfd9ee58fc14516b614ff9f84ec80e6880a36", "&'()*"},
{"$7z$128$19$0$1122$8$ccf696913989510d0000000000000000$1238556212$112$99$647264fbc665e73ecfe3ef7055fef0d91cb86833d6df08b2f7a3c1c89cf7cdaa09a802c8bfb2e5c6b55143a315df74d841b349fc8b43613d0f87cc90325fd56fc17ee08df7ce76cdc9cda61bd4d5632e20af3db16e921c755174f291c0aa6581844def4547380e2dd4a574435d17e1e8", "&'()*+"},
{"$7z$128$19$0$1122$8$d618bd3ec8bafd800000000000000000$1349785$112$99$6514e2e7468e6f0ed63796cfc0588ac2d75f024c4a0fa03778bd252d316d03e48a08ffcc0011725ad4f867e9a9666630dff4f352c59bcbadb94b9d0e2c42d653b80f480005ce868a0b1a075b2e00abd743de0867d69cdc8b56c7f9770537d50e6bb11eb0d2d7d8b6af5dd8ecb50ab553", "&'()*+,"},
{"$7z$128$19$0$1122$8$1c1586d191f190890000000000000000$642253888$112$99$f55cf9ab802b10a83471abe9319711ae79906cd6921365167c389470a3a8a72b0d877379daae2c24ea2258e8586f12d5036aff9ddc8e26861467b0843ffb72e4410c2be76ec111d37f875c81b244ed172f1f4765a220d830a9615787e9d07f8582146556e9c566b64897a47d18a82b36", "&'()*+,-"},
#if DEBUG
{"$7z$128$19$0$1122$8$0df03cbdbc73e22a0000000000000000$3194757927$112$99$df53e9d8b4e02cf2962ad87912021508a36910c399a7abc4a3a5423fa2184816af7172418eb4763924ec8b099b7ca95abdc6faac9aaa6e181ffa60b7e8bdb2bf576536ca69152e3b6b97302c796bbc9dec78db6ba7a4a58e68f8ee28f27dea26bd4f848dc3a3315e97e1463b5c171ce5", "&'()*+,-."},
{"$7z$128$19$0$1122$8$7785351cf9fe5dfa0000000000000000$1304801610$112$99$7b35280384726da8521fee0786ef43e0aa621394a6f015b65cbd7f1329f43c4543b8a451a0007c03a3ce3f61e639c54ede3e580600b113777822b6d562390d14ed236e5bac3d3af63ae23015148a95e7ccbc9eea653b52c606ca09ec51fd2b0c4cfc2b760fccc1fe0ccdd9ee3fcb8129", "&'()*+,-./"},
{"$7z$128$19$0$1122$8$70eb7f4b821cf5310000000000000000$3381356868$112$99$c26db2cb89df1237f323d92044726d03cfc7ba83115e789243c3b2570ae674d8356a23e004b103638b1ea9fe6ff5db844a1ddcaaed8a71a8d8e343f73868b4acafd34d493345439b0e0be87d2cf52eb4cceaafcff0dfaf9cf25080693ede267460320e1282b869a5f0b6c8789e769640", "&'()*+,-./0"},
{"$7z$128$19$0$1122$8$2ac0f1307794d8e10000000000000000$2871514580$112$99$4783d91fa72c377310654e961120e71ecdd27ec2e67366e83291daefcea03514ca9ecea031fcbd25c0759c1f242219e673cee093ef361664f18dacf85ca0620fd7092477ceeff7c548df0a475ce93278a564fe4ddb4ee2e4695cbe417a792e822204390ca5a530208a8ed51bc01f79e6", "&'()*+,-./01"},
{"$7z$128$19$0$1122$8$5bc4988c71cba8b70000000000000000$2815498089$112$99$0e4368dde66925e2bfac9a450291f8f817beaa891f08c4d2735d20b3147df581e2f3c53abfe2b0971186ac39280eb354ca5989f9043ad0288302d0ac59a3c8fa99d26c9619b81d22996f24eec1dba361afdd5e50060c2599a40a00c83c4ee0bc4ebe6e3126a64a743af95d9b22ee5867", "&'()*+,-./012"},
{"$7z$128$19$0$1122$8$33ab0ad513b7d6910000000000000000$107430285$112$99$f9f1195a4210eadc5b23f046f81c8cfaec3b90d8b6b67893f10bd9bedd0d859d0695bca5ce315cecbc2910dce27e4c1a1416675d841901c8d84846360b1919ebcba91143713c6b755758d3db64d39344da18222341818220cc43f3ee3a91cbc288f1aafe377b53def310d3b83d32aee3", "&'()*+,-./0123"},
{"$7z$128$19$0$1122$8$dd490a165c1b90f90000000000000000$2897354864$112$99$51efe41b67875503acebe2e199cb542a279520b468a61ba67b54612e317a84e95879a34eaad82124798f32c19f9c0786e8faaac768da5f6b2c91e3ba9f97a03a992c18b5b9b21a5f2b67ae9daeef37ec115f44bfb8b10ac3cb7862b6c024413a2ee801aa674df05e8b56bd8654f279f5", "&'()*+,-./01234"},
{"$7z$128$19$0$1122$8$9077cb191a5969b40000000000000000$3637063426$112$99$1e74746c59bdfe6b3f3d957493c9d5b92ba358f97e19d30e20443cb2fbac0501e07a162344ac7cf7cfa727c70a2bcf52593accc5c2c070c2331863ac76da5ad2f5de374292a87c6af67ab561f9cf71ae472ed1267d481c250f5b4d82d0ec0b2b8531db1fe4637c3f4e3a08de1b9b5418", "&'()*+,-./012345"},
{"$7z$128$19$0$1122$8$adc090d27b0343d30000000000000000$1147570982$112$99$ac14b9dc3751cfe6c1c719ceef3d73946fff2b0f924e06cd3177883df770e5505551bcf5598277801f46584a4f41530f50007c776d2bb91fd160148042275dfe4e420ff72244409f59c687a5bb2d0fc1bb29138689094fe40bb0f22785c63c631cd05abf4f7f3c9b6832e192e103d2f1", "&'()*+,-./0123456"},
{"$7z$128$19$0$1122$8$8dee69dc35517a2a0000000000000000$87427823$112$99$ea36cf8b577a0b5f31115f8550987f05f174b347a8a6433a08c013ecd816c8ecaad163c62db9bae6c57ace3c2a6ce0b36f78ad4723328cc022906400eed55e0e3685a5e8e6b369df780ee72f3d25ccd49d7f40d013052e080723dd4c0b1c75302c884ea956e3b6fd27261eb8c49dea51", "&'()*+,-./01234567"},
{"$7z$128$19$0$1122$8$200ce603d6f355f10000000000000000$3012105149$112$99$0ae42342f52172ad921178a25df3666e34e5a217d0afb3655088806f821d374bf522c197e59b131dbc574d4c936472f59f8892f69e47724ea52ecc5dc7d3ed734c557c9698a6f01519039714c065ad25008003c93cb7f694ee07267d5fcdebab5d149d5404023a0112faec2264d33ff6", "&'()*+,-./012345678"},
{"$7z$128$19$0$1122$8$a5007fc77fa5cc0b0000000000000000$1082728565$112$99$32c404c9633e9c61b76556e169695248008c51ca8f7f0f79c4a271ac6eb1d905a2622132f2f6988f9f3f5e375c592ec63d92d7b183b5801b149595ed440b23a083633de9f1cb5b6ac3238b7523b23141e686e6cbe9d4d3a28fc6489e902c17aeff6cd4cb516bef5cd5c6def78cb88ad4", "&'()*+,-./0123456789"},
{"$7z$128$19$0$1122$8$fd531c4e580be9a60000000000000000$1843420503$112$99$704289830b1add1c8ee6fd622ecf5b8da01988580bdb52f6269cc61c21838849d3a04299eaee15e0cae0eff9f6c3c82f71e434b3aa1c0ca824b90438c1c983130218acd128d9186e5dc2d19a8db602a0382cb60dadb4641b46fe532b799d29a4b882beaa9217f48ddccc99578617f8a0", "&'()*+,-./0123456789:"},
{"$7z$128$19$0$1122$8$7f94a95f71c1b0df0000000000000000$141406606$112$99$1a510a6fda9788b4f4b2274ea929044c00b61b23946bc417ead90ad64dcc9a55378f9ab74f7d693a5dcf455c00f82f6c2a885b664f4ab10c9969026714ce2773030f1c5872ca3948cd612e21b321826c2a561104d57a3ba2055f03aa9cc264821544ec4bccc41f4ac76aab97accb8f9c", "&'()*+,-./0123456789:;"},
{"$7z$128$19$0$1122$8$e24e93c7a9ebde080000000000000000$718561925$112$99$580bf36388526c932c22e3227b51774b6963a9c5b96fc8e2ac70a4302864fa88f50e7c00d9a79e0bca0f07a236e51200dc23435b7680e6fa99b19d790ac093af615a972f8b232686c21279234a2582f9714c5a1a2d326084158eba3e81b4f8ad40784d84baa8ddbed19f1c6603156d2c", "&'()*+,-./0123456789:;<"},
{"$7z$128$19$0$1122$8$6fbd519735b131710000000000000000$1248418560$112$99$cc9e3c97073d7fd37f04d4e6983b386e3ac00f6292dedb0f566dccf22cdbbb55fee8669edade383e96aa0a740e2b42aa7fddbe5831cac10828c624ee03a1a256c6e777c3d714c55296cb815c509a252b9426fe8d4566c944efe3fac5ea94910e55a390aef2c729a031e832c406049810", "&'()*+,-./0123456789:;<="},
{"$7z$128$19$0$1122$8$3ce1b899fc03d9c30000000000000000$1452122600$112$99$d4be60d5ab390713c7189f0dd808227c01f15f71fcf4bbccce6cb9238d6418c115eff59784d96ff8944575710a5799c7bcb761e8f1bfb7646a0e8fac3728ba4cca44fb82e5dd9f87bb26828566af64374b512fa094d35af8d743bded88b6257ec98a99b50dd225d4608b283bf035ac08", "&'()*+,-./0123456789:;<=>"},
{"$7z$128$19$0$1122$8$656e2285aabed25b0000000000000000$3885982465$112$99$77f2871e556e7f5278a9e896e91cd386ca8935128957d31fdce0603ea0e71c08b908a4c2d9f2d279757ced848be9482067c9d7935c88e5233aaa94a101d29908f7f015646758029d2078d25d0886bb9f0cdc0dd5136d72e90ceeea678564b199866dd8c9e5fe927102ee2dcf1cd4167f", "&'()*+,-./0123456789:;<=>?"},
{"$7z$128$19$0$1122$8$44ffefa48fa5a5b00000000000000000$1011653568$112$99$5d2504a1eb819218b9ad552e377d37e811ffccb64a554f404d982d209edfafb893b679cc881bbcbc606e67ffa055f712d7f140b554769511bc00321765830ea7c5db810fa2000ae7f4250b74aa61d881db66ae6f30e4c8e71887960c117b268d9934b8b5d52d4abdcb42b0e4ff40b805", "&'()*+,-./0123456789:;<=>?@"},
{"$7z$128$19$0$1122$8$b6e089dd0c52b6b80000000000000000$1229766981$112$99$49a8334d64d9cc7d710fe3b9c35f5d7cb0ec44d5db8a90966fbee93f85fdeeeca859c55519addb20c4628c9204dd24d1169b34dc53a2a685440fae7ed6748c172a8e9dcc42c8dffe60196818ad17a6f9314fcfd4d97cab3c18cf279df344e00fd04eaff32f29cbfcdb6832cfb69fe351", "&'()*+,-./0123456789:;<=>?@A"},
#endif /* DEBUG */
{NULL}
};
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static int *cracked;
static int new_keys;
static int max_kpc;
static unsigned char (*master)[32];
#ifdef SIMD_COEF_32
static uint32_t (*vec_in)[2][NBKEYS*16];
static uint32_t (*vec_out)[NBKEYS*8];
static int *indices;
#endif
static struct custom_salt {
dyna_salt dsalt;
size_t length; /* used in decryption */
size_t unpacksize; /* used in padding check */
size_t crc_len; /* used in CRC calculation */
int NumCyclesPower;
int SaltSize;
int ivSize;
int type;
unsigned char iv[16];
unsigned char salt[16];
unsigned int crc;
unsigned char props[LZMA_PROPS_SIZE];
unsigned char data[1];
} *cur_salt;
static void init(struct fmt_main *self)
{
CRC32_t crc;
#if defined (_OPENMP)
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
// allocate 1 more slot to handle the tail of vector buffer
max_kpc = self->params.max_keys_per_crypt + 1;
saved_key = mem_calloc(max_kpc, sizeof(*saved_key));
saved_len = mem_calloc(max_kpc, sizeof(*saved_len));
cracked = mem_calloc(max_kpc, sizeof(*cracked));
#ifdef SIMD_COEF_32
vec_in = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*vec_in), MEM_ALIGN_CACHE);
vec_out = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*vec_out), MEM_ALIGN_CACHE);
#endif
CRC32_Init(&crc);
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
MEM_FREE(master);
#ifdef SIMD_COEF_32
MEM_FREE(vec_in);
MEM_FREE(vec_out);
MEM_FREE(indices);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int type, len, NumCyclesPower;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (strlen(p) > 3 || !isdec(p))
goto err;
type = atoi(p);
if (strlen(p) == 0 || type < 0 || type > 128) /* Compression type */
goto err;
if (type > 2 && type != 128) /* none, LZMA or LZMA2 */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* NumCyclesPower */
goto err;
if (strlen(p) > 2)
goto err;
if (!isdec(p))
goto err;
NumCyclesPower = atoi(p);
if (NumCyclesPower > 24 || NumCyclesPower < 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > 16) /* salt length */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv length */
goto err;
if (strlen(p) > 2)
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > 16) /* iv length */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv */
goto err;
if (!ishexlc(p))
goto err;
if (strlen(p) / 2 > len && strcmp(p+len*2, "0000000000000000"))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* crc */
goto err;
if (!isdecu(p) && !isdec_negok(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* unpacksize */
goto err;
if (!isdec(p)) /* no way to validate, other than atoi() works for it */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data */
goto err;
if (strlen(p) / 2 != len) /* validates data_len atoi() */
goto err;
if (!ishexlc(p))
goto err;
if (type && type != 128) {
if ((p = strtokm(NULL, "$")) == NULL) /* CRC len */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* Coder props */
goto err;
if (!ishexlc(p))
goto err;
if (type == 1 && strlen(p) != 10)
goto err;
else if (type == 2 && strlen(p) != 2)
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
struct custom_salt cs;
struct custom_salt *psalt;
static void *ptr;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
if (!ptr)
ptr = mem_alloc_tiny(sizeof(struct custom_salt*),
sizeof(struct custom_salt*));
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.type = atoi(p);
p = strtokm(NULL, "$");
cs.NumCyclesPower = atoi(p);
p = strtokm(NULL, "$");
cs.SaltSize = atoi(p);
p = strtokm(NULL, "$"); /* salt */
p = strtokm(NULL, "$");
cs.ivSize = atoi(p);
p = strtokm(NULL, "$"); /* iv */
for (i = 0; i < cs.ivSize; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$"); /* crc */
cs.crc = atou(p); /* unsigned function */
p = strtokm(NULL, "$");
cs.length = atoll(p);
psalt = malloc(sizeof(struct custom_salt) + cs.length - 1);
memcpy(psalt, &cs, sizeof(cs));
p = strtokm(NULL, "$");
psalt->unpacksize = atoll(p);
p = strtokm(NULL, "$"); /* data */
for (i = 0; i < psalt->length; i++)
psalt->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if (cs.type && cs.type != 128) {
p = strtokm(NULL, "$"); /* CRC length */
psalt->crc_len = atoi(p);
p = strtokm(NULL, "$"); /* Coder properties */
for (i = 0; p[i * 2] ; i++)
psalt->props[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
MEM_FREE(keeptr);
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, length);
psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, length, data, psalt->length);
psalt->dsalt.salt_alloc_needs_free = 1;
memcpy(ptr, &psalt, sizeof(void*));
return ptr;
}
static void set_salt(void *salt)
{
static int old_power;
cur_salt = *((struct custom_salt**)salt);
if (old_power != cur_salt->NumCyclesPower) {
new_keys = 1;
old_power = cur_salt->NumCyclesPower;
}
}
static int salt_compare(const void *x, const void *y)
{
int c;
const struct custom_salt *s1 = *((struct custom_salt**)x);
const struct custom_salt *s2 = *((struct custom_salt**)y);
// we had to make the salt order deterministic, so that intersalt-restore works
if (s1->NumCyclesPower != s2->NumCyclesPower)
return (s1->NumCyclesPower - s2->NumCyclesPower);
c = memcmp(s1->salt, s2->salt, 16);
if (c) return c;
return memcmp(s1->iv, s2->iv, 16);
}
static void *SzAlloc(void *p, size_t size) { return mem_alloc(size); }
static void SzFree(void *p, void *address) { MEM_FREE(address) };
static int sevenzip_decrypt(unsigned char *derived_key)
{
unsigned char *out = NULL;
AES_KEY akey;
unsigned char iv[16];
union {
unsigned char crcc[4];
unsigned int crci;
} _crc_out;
unsigned char *crc_out = _crc_out.crcc;
unsigned int ccrc;
CRC32_t crc;
int i;
int nbytes, pad_size;
size_t crc_len = cur_salt->unpacksize;
size_t aes_len = cur_salt->crc_len ?
(cur_salt->crc_len * 11 + 150) / 160 * 16 : crc_len;
pad_size = nbytes = cur_salt->length - cur_salt->unpacksize;
/*
* Early rejection (only decrypt last 16 bytes). We don't seem to
* be able to trust this, see #2532, so we only do it for truncated
* hashes (it's the only thing we can do!).
*/
if ((cur_salt->type == 0x80 || TRUST_PADDING) &&
pad_size > 0 && cur_salt->length >= 32) {
uint8_t buf[16];
memcpy(iv, cur_salt->data + cur_salt->length - 32, 16);
AES_set_decrypt_key(derived_key, 256, &akey);
AES_cbc_encrypt(cur_salt->data + cur_salt->length - 16, buf,
16, &akey, iv, AES_DECRYPT);
i = 15;
while (nbytes > 0) {
if (buf[i] != 0)
return 0;
nbytes--;
i--;
}
if (cur_salt->type == 0x80) /* We only have truncated data */
return 1;
}
/* Complete decryption, or partial if possible */
aes_len = nbytes ? cur_salt->length : MIN(aes_len, cur_salt->length);
out = mem_alloc(aes_len);
memcpy(iv, cur_salt->iv, 16);
AES_set_decrypt_key(derived_key, 256, &akey);
AES_cbc_encrypt(cur_salt->data, out, aes_len, &akey, iv, AES_DECRYPT);
/* Padding check unless we already did the quick one */
if (TRUST_PADDING && nbytes) {
i = cur_salt->length - 1;
while (nbytes > 0) {
if (out[i] != 0)
goto exit_bad;
nbytes--;
i--;
}
}
if (cur_salt->type == 0x80) /* We only have truncated data */
goto exit_good;
/* Optional decompression before CRC */
if (cur_salt->type == 1) {
ISzAlloc st_alloc = {SzAlloc, SzFree};
ELzmaStatus status;
size_t in_size = aes_len;
uint8_t *new_out;
SRes rc;
size_t out_size = cur_salt->crc_len;
new_out = mem_alloc(out_size);
if ((rc = LzmaDecode(new_out, &out_size, out, &in_size,
cur_salt->props, LZMA_PROPS_SIZE,
LZMA_FINISH_ANY, &status,
&st_alloc)) == SZ_OK &&
out_size == cur_salt->crc_len) {
MEM_FREE(out);
out = new_out;
crc_len = cur_salt->crc_len;
} else {
MEM_FREE(new_out);
goto exit_bad;
}
}
else if (cur_salt->type == 2) {
Byte prop = cur_salt->props[0];
ISzAlloc st_alloc = {SzAlloc, SzFree};
ELzmaStatus status;
size_t in_size = aes_len;
uint8_t *new_out;
SRes rc;
size_t out_size = cur_salt->crc_len;
new_out = mem_alloc(out_size);
if ((rc = Lzma2Decode((Byte*)new_out, &out_size, out, &in_size,
prop, LZMA_FINISH_ANY, &status,
&st_alloc)) == SZ_OK &&
out_size == cur_salt->crc_len) {
MEM_FREE(out);
out = new_out;
crc_len = cur_salt->crc_len;
} else {
MEM_FREE(new_out);
goto exit_bad;
}
}
/* CRC test */
CRC32_Init(&crc);
CRC32_Update(&crc, out, crc_len);
CRC32_Final(crc_out, crc);
ccrc = _crc_out.crci; /* computed CRC */
#if !ARCH_LITTLE_ENDIAN
ccrc = JOHNSWAP(ccrc);
#endif
if (ccrc == cur_salt->crc)
goto exit_good;
exit_bad:
MEM_FREE(out);
return 0;
exit_good:
MEM_FREE(out);
return 1;
}
#ifdef SIMD_COEF_32
static void sevenzip_kdf(int buf_idx, int *indices, unsigned char *master)
{
int i, j;
long long round, rounds = (long long) 1 << cur_salt->NumCyclesPower;
uint32_t (*buf_in)[NBKEYS*16] = vec_in[buf_idx];
uint32_t *buf_out = vec_out[buf_idx];
int pw_len = saved_len[indices[0]];
int tot_len = (pw_len + 8)*rounds;
int acc_len = 0;
#if !ARCH_LITTLE_ENDIAN
unsigned char temp[8] = { 0,0,0,0,0,0,0,0 };
#endif
int cur_buf = 0;
int fst_blk = 1;
// it's assumed rounds is divisible by 64
for (round = 0; round < rounds; ++round) {
// copy password to vector buffer
for (i = 0; i < NBKEYS; ++i) {
UTF16 *buf = saved_key[indices[i]];
for (j = 0; j < pw_len; ++j) {
int len = acc_len + j;
char *in = (char*)buf_in[(len & 64)>>6];
in[GETPOS(len%64, i)] = ((char*)buf)[j];
}
for (j = 0; j < 8; ++j) {
int len = acc_len + pw_len + j;
char *in = (char*)buf_in[(len & 64)>>6];
#if ARCH_LITTLE_ENDIAN
in[GETPOS(len%64, i)] = ((char*)&round)[j];
#else
in[GETPOS(len%64, i)] = temp[j];
#endif
}
}
#if !ARCH_LITTLE_ENDIAN
for (j = 0; j < 8; j++)
if (++(temp[j]) != 0)
break;
#endif
acc_len += (pw_len + 8);
// swap out and compute digest on the filled buffer
if ((acc_len & 64) != (cur_buf << 6)) {
if (fst_blk)
SIMDSHA256body(buf_in[cur_buf], buf_out, NULL, SSEi_MIXED_IN);
else
SIMDSHA256body(buf_in[cur_buf], buf_out, buf_out, SSEi_MIXED_IN | SSEi_RELOAD);
fst_blk = 0;
cur_buf = 1 - cur_buf;
}
}
// padding
memset(buf_in[0], 0, sizeof(buf_in[0]));
for (i = 0; i < NBKEYS; ++i) {
buf_in[0][HASH_IDX_IN(i)] = (0x80U << 24);
buf_in[0][HASH_IDX_IN(i) + 15*SIMD_COEF_32] = tot_len*8;
}
SIMDSHA256body(buf_in[0], buf_out, buf_out, SSEi_MIXED_IN | SSEi_RELOAD);
// copy out result
for (i = 0; i < NBKEYS; ++i) {
uint32_t *m = (uint32_t*)&master[i*32];
for (j = 0; j < 32/4; ++j)
m[j] = JOHNSWAP(buf_out[HASH_IDX_OUT(i) + j*SIMD_COEF_32]);
}
}
#else
static void sevenzip_kdf(int index, unsigned char *master)
{
long long rounds = (long long) 1 << cur_salt->NumCyclesPower;
long long round;
#if !ARCH_LITTLE_ENDIAN
int i;
unsigned char temp[8] = { 0,0,0,0,0,0,0,0 };
#endif
SHA256_CTX sha;
/* kdf */
SHA256_Init(&sha);
for (round = 0; round < rounds; round++) {
if (cur_salt->SaltSize)
SHA256_Update(&sha, cur_salt->salt, cur_salt->SaltSize);
SHA256_Update(&sha, (char*)saved_key[index], saved_len[index]);
#if ARCH_LITTLE_ENDIAN
SHA256_Update(&sha, (char*)&round, 8);
#else
SHA256_Update(&sha, temp, 8);
for (i = 0; i < 8; i++)
if (++(temp[i]) != 0)
break;
#endif
}
SHA256_Final(master, &sha);
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef SIMD_COEF_32
static int tot_todo;
int len;
/* Tricky formula, see GitHub #1692 :-) */
if (!indices)
indices = mem_alloc((max_kpc + MIN(PLAINTEXT_LENGTH + 1, max_kpc) *
(NBKEYS - 1)) * sizeof(int));
if (!master)
master = mem_alloc((max_kpc + MIN(PLAINTEXT_LENGTH + 1, max_kpc) *
(NBKEYS - 1)) * sizeof(*master));
#else
if (!master)
master = mem_alloc(max_kpc * sizeof(*master));
#endif
#ifdef SIMD_COEF_32
if (new_keys) {
// sort passwords by length
tot_todo = 0;
for (len = 0; len <= PLAINTEXT_LENGTH*2; len += 2) {
for (index = 0; index < count; ++index) {
if (saved_len[index] == len)
indices[tot_todo++] = index;
}
while (tot_todo % NBKEYS)
indices[tot_todo++] = count;
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < tot_todo; index += NBKEYS)
{
int j;
if (new_keys)
sevenzip_kdf(index/NBKEYS, indices + index, master[index]);
/* do decryption and checks */
for (j = 0; j < NBKEYS; ++j) {
cracked[indices[index + j]] = sevenzip_decrypt(master[index + j]);
}
}
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
/* derive key */
if (new_keys)
sevenzip_kdf(index, master[index]);
/* do decryption and checks */
cracked[index] = sevenzip_decrypt(master[index]);
}
#endif // SIMD_COEF_32
new_keys = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void sevenzip_set_key(char *key, int index)
{
/* Convert key to utf-16-le format (--encoding aware) */
int len;
len = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (len <= 0) {
key[-len] = 0; // match truncation
len = strlen16(saved_key[index]);
}
len *= 2;
saved_len[index] = len;
new_keys = 1;
}
static char *get_key(int index)
{
return (char*)utf16_to_enc(saved_key[index]);
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = *((struct custom_salt **)salt);
return (unsigned int)(1 << my_salt->NumCyclesPower);
}
static unsigned int padding_size(void *salt)
{
struct custom_salt *my_salt;
my_salt = *((struct custom_salt **)salt);
return my_salt->length - my_salt->unpacksize;
}
static unsigned int compression_type(void *salt)
{
struct custom_salt *my_salt;
my_salt = *((struct custom_salt **)salt);
return my_salt->type;
}
struct fmt_main fmt_sevenzip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{
"iteration count",
"padding size",
"compression type",
},
{ FORMAT_TAG },
sevenzip_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
padding_size,
compression_type,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
salt_compare,
set_salt,
sevenzip_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
task-dependency.c | /*
* task-dependency.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s
// REQUIRES: tsan
#include "ompt/ompt-signal.h"
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(2) shared(var, a)
#pragma omp master
{
#pragma omp task shared(var, a) depend(out : var)
{
OMPT_SIGNAL(a);
var++;
}
#pragma omp task shared(a) depend(in : var)
{
OMPT_SIGNAL(a);
OMPT_WAIT(a, 3);
}
#pragma omp task shared(var) // depend(in: var) is missing here!
{
var++;
OMPT_SIGNAL(a);
}
// Give other thread time to steal the task.
OMPT_WAIT(a, 2);
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task-dependency.c:41
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task-dependency.c:30
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
zip_fmt_plug.c | /*
* ZIP cracker patch for JtR. Hacked together during June of 2011
* by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC.
*
* This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* http://www.winzip.com/aes_info.htm (There is a 1 in 65,536 chance that an
* incorrect password will yield a matching verification value; therefore, a
* matching verification value cannot be absolutely relied on to indicate a
* correct password.). The alternative is to implement/use a full unzip engine.
*
* This format significantly improved, Summer of 2014, JimF. Changed the signature
* to the $zip2$, and added logic to properly make this format work. Now there is no
* false positives any more. Now it properly cracks the passwords. There is
* an hmac-sha1 'key' that is also processed (and the decryption key), in the pbkdf2
* call. Now we use this hmac-sha1 key, process the compressed and encrypted buffer,
* compare to a 10 byte checksum (which is now the binary blob), and we KNOW that we
* have cracked or not cracked the key. The $zip$ was broken before, so that signature
* has simply been retired as DOA. This format is now much like the pkzip format.
* it may have all data contained within the hash string, OR it may have some, and
* have a file pointer on where to get the rest of the data.
*
* optimizations still that can be done.
* 1. decrypt and inflate some data for really large buffers, BEFORE doing the
* hmac-sha1 call. The inflate algorithm is pretty self checking for 'valid'
* data, so a few hundred bytes of checking and we are 99.999% sure we have the
* right password, before starting an expensive hmac (for instance if the zip blob
* was 50mb).
* 2. Put in the 'file magic' logic we have for pkzip. There is a place holder for it,
* but the logic has not been added.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_zip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_zip);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <ctype.h>
#include "arch.h"
#include "crc32.h"
#include "misc.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "memory.h"
#include "pkzip.h"
#include "pbkdf2_hmac_sha1.h"
#include "dyna_salt.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1 // Tuned on core i7
#endif
static int omp_t = 1;
#endif
#include "hmac_sha.h"
#include "memdbg.h"
#define KEY_LENGTH(mode) (8 * ((mode) & 3) + 8)
#define SALT_LENGTH(mode) (4 * ((mode) & 3) + 4)
typedef struct my_salt_t {
dyna_salt dsalt;
uint32_t comp_len;
struct {
uint16_t type : 4;
uint16_t mode : 4;
} v;
unsigned char passverify[2];
unsigned char salt[SALT_LENGTH(3)];
//uint64_t data_key; // MSB of md5(data blob). We lookup using this.
unsigned char datablob[1];
} my_salt;
#define FORMAT_LABEL "ZIP"
#define FORMAT_NAME "WinZip"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define PLAINTEXT_LENGTH 125
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(my_salt*)
#define SALT_ALIGN sizeof(my_salt*)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static unsigned char (*crypt_key)[((WINZIP_BINARY_SIZE+3)/4)*4];
static my_salt *saved_salt;
// filename:$zip2$*Ty*Mo*Ma*Sa*Va*Le*DF*Au*$/zip2$
// Ty = type (0) and ignored.
// Mo = mode (1 2 3 for 128/192/256 bit
// Ma = magic (file magic). This is reserved for now. See pkzip_fmt_plug.c or zip2john.c for information.
// For now, this must be a '0'
// Sa = salt(hex). 8, 12 or 16 bytes of salt (depends on mode)
// Va = Verification bytes(hex) (2 byte quick checker)
// Le = real compr len (hex) length of compressed/encrypted data (field DF)
// DF = compressed data DF can be L*2 hex bytes, and if so, then it is the ENTIRE file blob written 'inline'.
// However, if the data blob is too long, then a .zip ZIPDATA_FILE_PTR_RECORD structure will be the 'contents' of DF
// Au = Authentication code (hex) a 10 byte hex value that is the hmac-sha1 of data over D. This is the binary() value
// ZIPDATA_FILE_PTR_RECORD (this can be the 'DF' of this above hash line.
// *ZFILE*Fn*Oh*Ob* (Note, the leading and trailing * are the * that 'wrap' the DF object.
// ZFILE This is the literal string ZFILE
// Fn This is the name of the .zip file. NOTE the user will need to keep the .zip file in proper locations (same as
// was seen when running zip2john. If the file is removed, this hash line will no longer be valid.
// Oh Offset to the zip central header record for this blob.
// Ob Offset to the start of the blob data
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
int i;
my_salt salt, *psalt;
static unsigned char *ptr;
/* extract data from "ciphertext" */
c8 *copy_mem = strdup(ciphertext);
c8 *cp, *p;
if (!ptr) ptr = mem_alloc_tiny(sizeof(my_salt*),sizeof(my_salt*));
p = copy_mem + WINZIP_TAG_LENGTH+1; /* skip over "$zip2$*" */
memset(&salt, 0, sizeof(salt));
cp = strtokm(p, "*"); // type
salt.v.type = atoi((const char*)cp);
cp = strtokm(NULL, "*"); // mode
salt.v.mode = atoi((const char*)cp);
cp = strtokm(NULL, "*"); // file_magic enum (ignored)
cp = strtokm(NULL, "*"); // salt
for (i = 0; i < SALT_LENGTH(salt.v.mode); i++)
salt.salt[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])];
cp = strtokm(NULL, "*"); // validator
salt.passverify[0] = (atoi16[ARCH_INDEX(cp[0])]<<4) | atoi16[ARCH_INDEX(cp[1])];
salt.passverify[1] = (atoi16[ARCH_INDEX(cp[2])]<<4) | atoi16[ARCH_INDEX(cp[3])];
cp = strtokm(NULL, "*"); // data len
sscanf((const char *)cp, "%x", &salt.comp_len);
// later we will store the data blob in our own static data structure, and place the 64 bit LSB of the
// MD5 of the data blob into a field in the salt. For the first POC I store the entire blob and just
// make sure all my test data is small enough to fit.
cp = strtokm(NULL, "*"); // data blob
// Ok, now create the allocated salt record we are going to return back to John, using the dynamic
// sized data buffer.
psalt = (my_salt*)mem_calloc(1, sizeof(my_salt) + salt.comp_len);
psalt->v.type = salt.v.type;
psalt->v.mode = salt.v.mode;
psalt->comp_len = salt.comp_len;
psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them.
memcpy(psalt->salt, salt.salt, sizeof(salt.salt));
psalt->passverify[0] = salt.passverify[0];
psalt->passverify[1] = salt.passverify[1];
// set the JtR core linkage stuff for this dyna_salt
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(my_salt, comp_len);
psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(my_salt, comp_len, datablob, psalt->comp_len);
if (strcmp((const char*)cp, "ZFILE")) {
for (i = 0; i < psalt->comp_len; i++)
psalt->datablob[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])];
} else {
c8 *Fn, *Oh, *Ob;
long len;
uint32_t id;
FILE *fp;
Fn = strtokm(NULL, "*");
Oh = strtokm(NULL, "*");
Ob = strtokm(NULL, "*");
fp = fopen((const char*)Fn, "rb");
if (!fp) {
psalt->v.type = 1; // this will tell the format to 'skip' this salt, it is garbage
goto Bail;
}
sscanf((const char*)Oh, "%lx", &len);
if (fseek(fp, len, SEEK_SET)) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
id = fget32LE(fp);
if (id != 0x04034b50U) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
sscanf((const char*)Ob, "%lx", &len);
if (fseek(fp, len, SEEK_SET)) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
if (fread(psalt->datablob, 1, psalt->comp_len, fp) != psalt->comp_len) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
fclose(fp);
}
Bail:;
MEM_FREE(copy_mem);
memcpy(ptr, &psalt, sizeof(my_salt*));
return (void*)ptr;
}
static void set_salt(void *salt)
{
saved_salt = *((my_salt**)salt);
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
if (saved_salt->v.type) {
// This salt passed valid() but failed get_salt().
// Should never happen.
memset(crypt_key, 0, count * WINZIP_BINARY_SIZE);
return count;
}
#ifdef _OPENMP
#pragma omp parallel for default(none) private(index) shared(count, saved_key, saved_salt, crypt_key)
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {
#ifdef SIMD_COEF_32
unsigned char pwd_ver[64*MAX_KEYS_PER_CRYPT];
int lens[MAX_KEYS_PER_CRYPT], i;
int something_hit = 0, hits[MAX_KEYS_PER_CRYPT] = {0};
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = &pwd_ver[i*(2+2*KEY_LENGTH(saved_salt->v.mode))];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, saved_salt->salt,
SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS,
pout, 2, 2*KEY_LENGTH(saved_salt->v.mode));
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
if (!memcmp(pout[i], saved_salt->passverify, 2))
something_hit = hits[i] = 1;
if (something_hit) {
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
saved_salt->salt,
SALT_LENGTH(saved_salt->v.mode),
KEYING_ITERATIONS, pout,
KEY_LENGTH(saved_salt->v.mode),
KEY_LENGTH(saved_salt->v.mode));
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if (hits[i]) {
hmac_sha1(pout[i], KEY_LENGTH(saved_salt->v.mode),
(const unsigned char*)saved_salt->datablob,
saved_salt->comp_len, crypt_key[index+i],
WINZIP_BINARY_SIZE);
}
else
memset(crypt_key[index+i], 0, WINZIP_BINARY_SIZE);
}
} else {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
memset(crypt_key[index+i], 0, WINZIP_BINARY_SIZE);
}
#else
union {
unsigned char pwd_ver[64];
ARCH_WORD_32 w;
} x;
unsigned char *pwd_ver = x.pwd_ver;
pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]),
saved_salt->salt, SALT_LENGTH(saved_salt->v.mode),
KEYING_ITERATIONS, pwd_ver, 2,
2*KEY_LENGTH(saved_salt->v.mode));
if (!memcmp(pwd_ver, saved_salt->passverify, 2)) {
pbkdf2_sha1((unsigned char *)saved_key[index],
strlen(saved_key[index]), saved_salt->salt,
SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS,
pwd_ver, KEY_LENGTH(saved_salt->v.mode),
KEY_LENGTH(saved_salt->v.mode));
hmac_sha1(pwd_ver, KEY_LENGTH(saved_salt->v.mode),
(const unsigned char*)saved_salt->datablob,
saved_salt->comp_len, crypt_key[index],
WINZIP_BINARY_SIZE);
}
else
memset(crypt_key[index], 0, WINZIP_BINARY_SIZE);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; i++)
if (((ARCH_WORD_32*)&(crypt_key[i]))[0] == ((ARCH_WORD_32*)binary)[0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (((ARCH_WORD_32*)&(crypt_key[index]))[0] == ((ARCH_WORD_32*)binary)[0]);
}
static int cmp_exact(char *source, int index)
{
void *b = winzip_common_binary(source);
return !memcmp(b, crypt_key[index], sizeof(crypt_key[index]));
}
struct fmt_main fmt_zip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
WINZIP_BENCHMARK_COMMENT,
WINZIP_BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
4, // WINZIP_BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT,
{ NULL },
{ WINZIP_FORMAT_TAG },
winzip_common_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
winzip_common_valid,
winzip_common_split,
winzip_common_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unaryop__ainv_int32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_int64
// op(A') function: GB_tran__ainv_int32_int64
// C type: int32_t
// A type: int64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_int64
(
int32_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c | #include <stdio.h>
#include "../utilities/check.h"
#define N 100
int main()
{
check_offloading();
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k, l;
#pragma omp simd collapse(2)
for(k=0; k<N/4; k++)
for(l=0; l<4; l++)
a[k*4+l] = k*4+l;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return 0;
}
}
// report
printf("done with %d errors\n", error);
return error;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) {
for (t4=max(max(ceild(t1-508,512),ceild(4*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(floord(4*Nt+Nx-9,1024),floord(2*t1+Nx-3,1024)),floord(4*t2+Nx-9,1024)),floord(4*t3+Nx-9,1024));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(1024*t4,4*t5+4);
ubv=min(1024*t4+1023,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
openmp_common.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 -o - %s
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
void foo() {
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
}
typedef struct S {
#pragma omp parallel for private(j) schedule(static) if (tree1->totleaf > 1024) // expected-error {{unexpected OpenMP directive '#pragma omp parallel for'}}
} St;
|
pr66199-1.c | /* PR middle-end/66199 */
/* { dg-do run } */
int u[1024], v[1024], w[1024];
__attribute__((noinline, noclone)) long
f1 (long a, long b)
{
long d;
#pragma omp parallel for simd default(none) firstprivate (a, b) shared(u, v, w)
for (d = a; d < b; d++)
u[d] = v[d] + w[d];
return d;
}
__attribute__((noinline, noclone)) long
f2 (long a, long b, long c)
{
long d, e;
#pragma omp parallel for simd default(none) firstprivate (a, b) shared(u, v, w) linear(d) linear(c:5) lastprivate(e)
for (d = a; d < b; d++)
{
u[d] = v[d] + w[d];
c += 5;
e = c;
}
return d + c + e;
}
__attribute__((noinline, noclone)) long
f3 (long a1, long b1, long a2, long b2)
{
long d1, d2;
#pragma omp parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2)
for (d1 = a1; d1 < b1; d1++)
for (d2 = a2; d2 < b2; d2++)
u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];
return d1 + d2;
}
__attribute__((noinline, noclone)) long
f4 (long a1, long b1, long a2, long b2)
{
long d1, d2;
#pragma omp parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) collapse(2)
for (d1 = a1; d1 < b1; d1++)
for (d2 = a2; d2 < b2; d2++)
u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];
return d1 + d2;
}
int
main ()
{
if (f1 (0, 1024) != 1024
|| f2 (0, 1024, 17) != 1024 + 2 * (17 + 5 * 1024)
|| f3 (0, 32, 0, 32) != 64
|| f4 (0, 32, 0, 32) != 64)
__builtin_abort ();
return 0;
}
|
GB_unaryop__identity_int16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_uint8
// op(A') function: GB_tran__identity_int16_uint8
// C type: int16_t
// A type: uint8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_uint8
(
int16_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_bool
// op(A') function: GB_tran__minv_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_bool
(
uint32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c |
#include <omp.h>
#include <stdio.h>
#include <sys/time.h>
float a[2048*32]= {3};
float b[2048*32]= {4};
int main() {
//omp_set_num_threads(4);
// to get total number of threads
//omp_get_thread_num();
//#pragma omp parallel
//{
// int ID= omp_get_thread_num();;
// printf("hello 0 (%d)\n", ID);
// #pragma omp barrier
// printf("hello 1 (%d)\n", ID);
//}
//printf("all done!\n");
int count= 2048*32;
int i=0;
int j=0;
struct timeval start, end;
gettimeofday(&start, NULL);
//#pragma omp parallel
//{
for(j=0; j<100000; j++) {
//printf("%d\n", j);
#pragma omp simd
//#pragma omp for
for(i=0; i<count; i++) {
a[i]= a[i] + b[i];
}
}
//}
printf("Here");
gettimeofday(&end, NULL);
float delta = ((end.tv_sec - start.tv_sec) * 1000000u +
end.tv_usec - start.tv_usec) / 1.e6;
printf("%f s\n", delta);
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
/// All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module = nullptr;
bool ModuleInterface = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *&Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
Partition, ///< 'module partition X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
/// Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation());
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *&Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext, QualType BaseType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDefaultedMemberExceptionSpecs.empty() &&
"there shouldn't be any pending delayed defaulted member "
"exception specs");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDefaultedMemberExceptionSpecs)
SavedDefaultedMemberExceptionSpecs;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDefaultedMemberExceptionSpecs.swap(
S.DelayedDefaultedMemberExceptionSpecs);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/token.h"
#include "MagickCore/token-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if ((double) image->delay > floor(geometry_info.rho+0.5))
image->delay=(size_t) CastDoubleToLong(floor(
geometry_info.rho+0.5));
}
else
if ((flags & LessValue) != 0)
{
if ((double) image->delay < floor(geometry_info.rho+0.5))
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
else
image->delay=(size_t) CastDoubleToLong(floor(
geometry_info.rho+0.5));
if ((flags & SigmaValue) != 0)
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info));
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
image->mask_trait=UpdatePixelTrait;
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->alpha_trait=image->alpha_trait;
clone_image->channels=image->channels;
clone_image->mask_trait=image->mask_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) CastDoubleToLong(floor(scale*
image->page.width+0.5));
clone_image->page.x=CastDoubleToLong(ceil(scale*image->page.x-0.5));
clone_image->tile_offset.x=CastDoubleToLong(ceil(scale*
image->tile_offset.x-0.5));
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) CastDoubleToLong(floor(scale*
image->page.height+0.5));
clone_image->page.y=CastDoubleToLong(ceil(scale*image->page.y-0.5));
clone_image->tile_offset.y=CastDoubleToLong(ceil(scale*
image->tile_offset.y-0.5));
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
switch (type)
{
case ReadPixelMask:
{
if ((image->channels & ReadMaskChannel) == 0)
return((Image *) NULL);
break;
}
case WritePixelMask:
{
if ((image->channels & WriteMaskChannel) == 0)
return((Image *) NULL);
break;
}
default:
{
if ((image->channels & CompositeMaskChannel) == 0)
return((Image *) NULL);
break;
}
}
mask_image=AcquireImage((ImageInfo *) NULL,exception);
status=SetImageExtent(mask_image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(mask_image));
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case ReadPixelMask:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
const char
*p;
int
c;
MagickBooleanType
canonical;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
if (IsStringTrue(GetImageOption(image_info,"filename:literal")) != MagickFalse)
return(strlen(filename));
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MagickPathExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
char
*r;
ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),option,(size_t)
(MagickPathExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(option)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
else
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
size_t
length;
ssize_t
y;
void
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset(pixels,0,length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum));
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: the level of transparency: 0 is fully transparent and QuantumRange
% is fully opaque.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) > (QuantumRange/2))
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth == 0)
{
image->depth=8;
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageDepthNotSupported","`%s'",image->filename);
}
if (image->depth > (8*sizeof(MagickSizeType)))
{
image->depth=8*sizeof(MagickSizeType);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageDepthNotSupported","`%s'",image->filename);
}
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const MagickInfo *SetImageInfoFromExtension(ImageInfo *image_info,
const char *component,char *magic,ExceptionInfo *exception)
{
const MagickInfo
*magick_info;
MagickFormatType
format_type;
ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
return(magick_info);
}
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
path[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
if (*component != '\0')
{
/*
Base path sans any compression extension.
*/
GetPathComponent(image_info->filename,BasePathSansCompressExtension,path);
GetPathComponent(path,ExtensionPath,component);
}
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*component != '\0') && (IsGlob(component) == MagickFalse))
magick_info=SetImageInfoFromExtension(image_info,component,magic,
sans_exception);
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,component);
else
GetPathComponent(image_info->filename,SubcanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=(const DelegateInfo *) NULL;
if (magick_info == (const MagickInfo *) NULL)
{
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if ((delegate_info == (const DelegateInfo *) NULL) &&
((*component != '\0') && (IsGlob(component) == MagickFalse)))
{
/*
Retry in case GetMagickInfo loaded a custom module.
*/
magick_info=SetImageInfoFromExtension(image_info,component,magic,
sans_exception);
}
}
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to seekable temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(component);
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(component);
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireQuantumMemory(1,magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) memset(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic cache.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->magick_module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(mask,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0.0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelCompositeMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=QuantumRange;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=(Quantum) 0;
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,pixel,q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelCompositeMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
const Quantum
*p;
ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
const Quantum
*p;
ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
magick_unreferenced(exception);
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
unsigned char
fx_op[2];
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(unsigned char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op);
*fx_op=(unsigned char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op);
*fx_op=(unsigned char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op);
*fx_op=(unsigned char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op);
*fx_op=(unsigned char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op);
*fx_op=(unsigned char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op);
*fx_op=(unsigned char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op);
*fx_op=(unsigned char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op);
*fx_op=(unsigned char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op);
*fx_op=(unsigned char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",(char *) fx_op);
*fx_op=(unsigned char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op);
*fx_op=(unsigned char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",(char *) fx_op);
*fx_op=(unsigned char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op);
*fx_op=(unsigned char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",(char *) fx_op);
*fx_op=(unsigned char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",(char *) fx_op);
*fx_op=(unsigned char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op);
*fx_op=(unsigned char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op);
*fx_op=(unsigned char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",(char *) fx_op);
*fx_op=(unsigned char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",(char *) fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireQuantumMemory(1,sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"median",6) == 0)
{
double
median;
(void) GetImageMedian(image,&median,exception);
statistic=median;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
register size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace(c) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"median",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseConditional(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
if (strlen(q) == 1) \
*(q+1)='\0'; \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
register const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent-1);
FxParseConditional(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,
beta,exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.