source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
axpy_ompacc2.c | // Add example of writing multiple device code using OpenMP 4.0
//
/* change this to do saxpy or daxpy : single precision or double precision*/
#include <omp.h>
#define REAL double
#ifdef __cplusplus
extern "C" {
#endif
/* both the omp version and ompacc version */
extern void axpy_omp(REAL* x, REAL* y, int n, REAL a);
extern void axpy_ompacc(REAL* x, REAL* y, int n, REAL a);
extern double read_timer(); /* in second */
extern double read_timer_ms(); /* in ms */
#ifdef __cplusplus
}
#endif
/* standard one-dev support */
void axpy_ompacc(REAL* x, REAL* y, int n, REAL a) {
int i;
/* this one defines both the target device name and data environment to map to,
I think here we need mechanism to tell the compiler the device type (could be multiple) so that compiler can generate the codes of different versions;
we also need to let the runtime know what the target device is so the runtime will chose the right function to call if the code are generated
#pragma omp target device (gpu0) map(x, y)
*/
#pragma omp target device (gpu0) map(tofrom: y[0:n]) map(to: x[0:n],a,n)
#pragma omp parallel for shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* version 1: use omp parallel, i.e. each host thread responsible for one dev */
void axpy_mdev_v1(REAL* x, REAL* y, int n, REAL a) {
int ndev = omp_get_num_devices(); /* standard omp call, see ticket 167 */
#pragma omp parallel num_threads(ndev)
{
int i;
/* chunking it for each device */
int devid = omp_get_thread_num();
int remain = n % ndev;
int esize = n / ndev;
int partsize, starti, endi;
if (devid < remain) { /* each of the first remain dev has one more element */
partsize = esize+1;
starti = partsize*devid;
} else {
partsize = esize;
starti = esize*devid+remain;
}
endi=starti + partsize;
#pragma omp target device (devid) map(tofrom: y[starti:endi]) map(to: x[starti:endi],a,partsize)
#pragma omp parallel for shared(x, y, partsize, a) private(i)
for (i = 0; i < partsize; ++i)
y[i] += a * x[i];
}
}
|
bcnn_fc_layer.c | /*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "bcnn_fc_layer.h"
#ifdef BCNN_USE_BLAS
#include "cblas.h"
#endif
#include <bh/bh_log.h>
#include <bh/bh_mem.h>
#include <bh/bh_string.h>
#include "bcnn_activation_layer.h"
#include "bcnn_learner.h"
#include "bcnn_mat.h"
#include "bcnn_net.h"
#include "bcnn_tensor.h"
#include "bcnn_utils.h"
bcnn_status bcnn_add_fullc_layer(bcnn_net *net, int output_size,
bcnn_filler_type init,
bcnn_activation activation, int quantize,
const char *src_id, const char *dst_id) {
bcnn_node node = {0};
bcnn_tensor dst_tensor = {0};
if (net->num_nodes > 0) {
int is_src_node_found = 0;
for (int i = net->num_tensors - 1; i >= 0; --i) {
if (strcmp(net->tensors[i].name, src_id) == 0) {
bcnn_node_add_input(net, &node, i);
is_src_node_found = 1;
break;
}
}
BCNN_CHECK_AND_LOG(
net->log_ctx, is_src_node_found, BCNN_INVALID_PARAMETER,
"Full-connected layer: invalid input node name %s\n", src_id);
} else {
bcnn_node_add_input(net, &node, 0);
}
int input_size = bcnn_tensor_size3d(&net->tensors[node.src[0]]);
// Setup weights and biases
char weights_name[256];
sprintf(weights_name, "%s_w", src_id);
char biases_name[256];
sprintf(biases_name, "%s_b", src_id);
// Create weights tensor
bcnn_tensor weights = {0};
bcnn_tensor_create(&weights, output_size, net->tensors[node.src[0]].c,
net->tensors[node.src[0]].h, net->tensors[node.src[0]].w,
1, weights_name, net->mode);
bcnn_tensor_filler w_filler = {.range = input_size, .type = init};
bcnn_tensor_fill(&weights, w_filler);
bcnn_net_add_tensor(net, weights);
bcnn_node_add_input(net, &node, net->num_tensors - 1);
// Create bias tensor
bcnn_tensor biases = {0};
bcnn_tensor_create(&biases, 1, 1, 1, output_size, 1, biases_name,
net->mode);
bcnn_net_add_tensor(net, biases);
bcnn_node_add_input(net, &node, net->num_tensors - 1);
// Setup output tensor
bcnn_tensor_set_shape(&dst_tensor,
net->tensors[node.src[0]].n, // batch size
output_size, // depth
1, // height
1, // width
1);
bcnn_tensor_allocate(&dst_tensor, net->mode);
bh_strfill(&dst_tensor.name, dst_id);
// Add tensor to net
bcnn_net_add_tensor(net, dst_tensor);
// Add tensor output index to node
bcnn_node_add_output(net, &node, net->num_tensors - 1);
node.type = BCNN_LAYER_FULL_CONNECTED;
node.param_size = sizeof(bcnn_fullc_param);
node.param = (bcnn_fullc_param *)calloc(1, node.param_size);
bcnn_fullc_param *param = (bcnn_fullc_param *)node.param;
param->activation = activation;
if (net->learner != NULL) {
if (net->learner->optimizer == BCNN_OPTIM_ADAM) {
int weights_size = bcnn_tensor_size(&weights);
param->adam_m = (float *)bh_align_calloc(
weights_size * sizeof(float), align_offset_);
param->adam_v = (float *)bh_align_calloc(
weights_size * sizeof(float), align_offset_);
}
}
#ifdef BCNN_USE_CUDA
if (net->learner != NULL) {
if (net->learner->optimizer == BCNN_OPTIM_ADAM) {
int weights_size = bcnn_tensor_size(&weights);
param->adam_m_gpu =
bcnn_cuda_memcpy_f32(param->adam_m, weights_size);
param->adam_v_gpu =
bcnn_cuda_memcpy_f32(param->adam_v, weights_size);
}
}
#endif
node.forward = bcnn_forward_fullc_layer;
node.backward = bcnn_backward_fullc_layer;
node.update = bcnn_update_fullc_layer;
node.release_param = bcnn_release_param_fullc_layer;
bcnn_net_add_node(net, node);
char node_opname[256];
snprintf(node_opname, 256, BH_LOG_BOLDBLUE "[Dense]" BH_LOG_RESET);
BCNN_INFO(net->log_ctx,
"%-48s %-8s (%4d x%4d x%4d) -> %-8s (%4d x%4d x%4d)\n",
node_opname, net->tensors[node.src[0]].name,
net->tensors[node.src[0]].w, net->tensors[node.src[0]].h,
net->tensors[node.src[0]].c, net->tensors[node.dst[0]].name,
net->tensors[node.dst[0]].w, net->tensors[node.dst[0]].h,
net->tensors[node.dst[0]].c);
return BCNN_SUCCESS;
}
void bcnn_forward_fullc_layer_cpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_tensor *weights = &net->tensors[node->src[1]];
bcnn_tensor *biases = &net->tensors[node->src[2]];
bcnn_fullc_param *param = (bcnn_fullc_param *)node->param;
int batch_size = dst_tensor->n;
int src_size = bcnn_tensor_size3d(src_tensor);
int dst_size = bcnn_tensor_size3d(dst_tensor);
int sz = bcnn_tensor_size(dst_tensor);
int spatial_size = bcnn_tensor_size2d(src_tensor);
memset(dst_tensor->data, 0, dst_size * batch_size * sizeof(float));
for (int b = 0; b < batch_size; ++b) {
#pragma omp parallel for num_threads(net->num_threads)
for (int p = 0; p < dst_tensor->c; p++) {
float sum = 0.0f;
for (int q = 0; q < src_tensor->c; q++) {
float *w = weights->data + src_size * p + spatial_size * q;
float *x = src_tensor->data + b * src_size + q * spatial_size;
sum += bcnn_dot(spatial_size, x, w);
}
dst_tensor->data[b * dst_tensor->c + p] = sum;
}
}
for (int i = 0; i < batch_size; ++i) {
bcnn_axpy(dst_size, 1, biases->data, dst_tensor->data + i * dst_size);
}
// TODO: prelu not supported
bcnn_forward_activation_cpu(dst_tensor->data, sz, NULL,
dst_tensor->w * dst_tensor->h, dst_tensor->c,
param->activation);
return;
}
void bcnn_backward_fullc_layer_cpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_tensor *weights = &net->tensors[node->src[1]];
bcnn_tensor *biases = &net->tensors[node->src[2]];
bcnn_fullc_param *param = (bcnn_fullc_param *)node->param;
int batch_size = dst_tensor->n;
int src_size = bcnn_tensor_size3d(src_tensor);
int dst_size = bcnn_tensor_size3d(dst_tensor);
int sz = bcnn_tensor_size(dst_tensor);
bcnn_backward_activation_cpu(dst_tensor->data, dst_tensor->grad_data, sz,
NULL, NULL, dst_tensor->w * dst_tensor->h,
dst_tensor->c, param->activation);
for (int i = 0; i < batch_size; ++i) {
bcnn_axpy(dst_size, 1, dst_tensor->grad_data + i * dst_size,
biases->grad_data);
}
#ifdef BCNN_USE_BLAS
cblas_sgemm(CblasRowMajor, CblasTrans, CblasNoTrans, dst_size, src_size,
batch_size, 1.0f, dst_tensor->grad_data, dst_size,
src_tensor->data, src_size, 1.0f, weights->grad_data, src_size);
#else
// Original
bcnn_gemm(net->gemm_ctx, 1, 0, dst_size, src_size, batch_size, 1.0f,
dst_tensor->grad_data, dst_size, src_tensor->data, src_size, 1.0f,
weights->grad_data, src_size, net->num_threads);
#endif
if (src_tensor->grad_data) {
#ifdef BCNN_USE_BLAS
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, batch_size,
src_size, dst_size, 1.0f, dst_tensor->grad_data, dst_size,
weights->data, src_size, 1.0f, src_tensor->grad_data,
src_size);
#else
// Original
bcnn_gemm(net->gemm_ctx, 0, 0, batch_size, src_size, dst_size, 1.0f,
dst_tensor->grad_data, dst_size, weights->data, src_size,
1.0f, src_tensor->grad_data, src_size, net->num_threads);
#endif
}
return;
}
#ifdef BCNN_USE_CUDA
void bcnn_forward_fullc_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_tensor *weights = &net->tensors[node->src[1]];
bcnn_tensor *biases = &net->tensors[node->src[2]];
bcnn_fullc_param *param = (bcnn_fullc_param *)node->param;
int batch_size = dst_tensor->n;
int src_size = bcnn_tensor_size3d(src_tensor);
int dst_size = bcnn_tensor_size3d(dst_tensor);
int sz = bcnn_tensor_size(dst_tensor);
bcnn_cuda_fill_f32(dst_size * batch_size, 0.0f, dst_tensor->data_gpu, 1);
bcnn_cuda_gemm(0, 1, batch_size, dst_size, src_size, 1,
src_tensor->data_gpu, src_size, weights->data_gpu, src_size,
1, dst_tensor->data_gpu, dst_size);
for (int i = 0; i < batch_size; ++i) {
bcnn_cuda_axpy(dst_size, 1, biases->data_gpu, 1,
dst_tensor->data_gpu + i * dst_size, 1);
}
bcnn_forward_activation_gpu(dst_tensor->data_gpu, sz, param->activation);
return;
}
void bcnn_backward_fullc_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_tensor *weights = &net->tensors[node->src[1]];
bcnn_tensor *biases = &net->tensors[node->src[2]];
bcnn_fullc_param *param = (bcnn_fullc_param *)node->param;
int batch_size = dst_tensor->n;
int src_size = bcnn_tensor_size3d(src_tensor);
int dst_size = bcnn_tensor_size3d(dst_tensor);
int sz = bcnn_tensor_size(dst_tensor);
bcnn_backward_activation_gpu(
dst_tensor->data_gpu, dst_tensor->grad_data_gpu, sz, param->activation);
for (int i = 0; i < batch_size; ++i) {
bcnn_cuda_axpy(dst_size, 1, dst_tensor->grad_data_gpu + i * dst_size, 1,
biases->grad_data_gpu, 1);
}
bcnn_cuda_gemm(1, 0, dst_size, src_size, batch_size, 1,
dst_tensor->grad_data_gpu, dst_size, src_tensor->data_gpu,
src_size, 1, weights->grad_data_gpu, src_size);
if (src_tensor->grad_data_gpu) {
bcnn_cuda_gemm(0, 0, batch_size, src_size, dst_size, 1,
dst_tensor->grad_data_gpu, dst_size, weights->data_gpu,
src_size, 1, src_tensor->grad_data_gpu, src_size);
}
return;
}
#endif
void bcnn_forward_fullc_layer(bcnn_net *net, bcnn_node *node) {
#ifdef BCNN_USE_CUDA
return bcnn_forward_fullc_layer_gpu(net, node);
#else
return bcnn_forward_fullc_layer_cpu(net, node);
#endif
}
void bcnn_backward_fullc_layer(bcnn_net *net, bcnn_node *node) {
#ifdef BCNN_USE_CUDA
return bcnn_backward_fullc_layer_gpu(net, node);
#else
return bcnn_backward_fullc_layer_cpu(net, node);
#endif
}
void bcnn_update_fullc_layer(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *weights = &net->tensors[node->src[1]];
bcnn_tensor *biases = &net->tensors[node->src[2]];
bcnn_fullc_param *param = (bcnn_fullc_param *)node->param;
int batch_size = net->batch_size;
int weights_size = bcnn_tensor_size(weights);
int biases_size = bcnn_tensor_size(biases);
switch (net->learner->optimizer) {
case BCNN_OPTIM_ADAM: {
#ifdef BCNN_USE_CUDA
bcnn_adam_update_gpu(
weights->data_gpu, biases->data_gpu, weights->grad_data_gpu,
biases->grad_data_gpu, param->adam_m_gpu, param->adam_v_gpu,
weights_size, biases_size, batch_size, net->learner->seen,
net->learner->beta1, net->learner->beta2,
net->learner->learning_rate, net->learner->momentum,
net->learner->decay);
#else
bcnn_adam_update_cpu(weights->data, biases->data,
weights->grad_data, biases->grad_data,
param->adam_m, param->adam_v, weights_size,
biases_size, batch_size, net->learner->seen,
net->learner->beta1, net->learner->beta2,
net->learner->learning_rate,
net->learner->momentum, net->learner->decay);
#endif
break;
}
case BCNN_OPTIM_SGD: {
#ifdef BCNN_USE_CUDA
bcnn_sgd_update_gpu(weights->data_gpu, biases->data_gpu,
weights->grad_data_gpu, biases->grad_data_gpu,
weights_size, biases_size, batch_size,
net->learner->learning_rate,
net->learner->momentum, net->learner->decay);
#else
bcnn_sgd_update_cpu(weights->data, biases->data, weights->grad_data,
biases->grad_data, weights_size, biases_size,
batch_size, net->learner->learning_rate,
net->learner->momentum, net->learner->decay);
#endif
break;
}
default: { break; }
}
}
void bcnn_release_param_fullc_layer(bcnn_node *node) {
bcnn_fullc_param *param = (bcnn_fullc_param *)node->param;
bh_align_free(param->adam_m);
bh_align_free(param->adam_v);
#ifdef BCNN_USE_CUDA
if (param->adam_m_gpu) {
bcnn_cuda_free(param->adam_m_gpu);
}
if (param->adam_v_gpu) {
bcnn_cuda_free(param->adam_v_gpu);
}
#endif
return;
} |
GB_unop__identity_int8_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_int64)
// op(A') function: GB (_unop_tran__identity_int8_int64)
// C type: int8_t
// A type: int64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_int64)
(
int8_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__trunc_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__trunc_fp32_fp32)
// op(A') function: GB (_unop_tran__trunc_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = truncf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = truncf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = truncf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__trunc_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = truncf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = truncf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__trunc_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
reduction_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Denis Demidov
//
#if !defined(KRATOS_REDUCTION_UTILITIES_H_INCLUDED )
#define KRATOS_REDUCTION_UTILITIES_H_INCLUDED
// System includes
#include <tuple>
#include <limits>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "utilities/atomic_utilities.h"
namespace Kratos
{
///@addtogroup KratosCore
/** @brief utility function to do a sum reduction
*/
template<class TDataType, class TReturnType = TDataType>
class SumReduction
{
public:
typedef TDataType value_type;
typedef TReturnType return_type;
TReturnType mValue = TReturnType(); // deliberately making the member value public, to allow one to change it as needed
/// access to reduced value
TReturnType GetValue() const
{
return mValue;
}
/// NON-THREADSAFE (fast) value of reduction, to be used within a single thread
void LocalReduce(const TDataType value){
mValue += value;
}
/// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads
void ThreadSafeReduce(const SumReduction<TDataType, TReturnType>& rOther)
{
AtomicAdd(mValue, rOther.mValue);
}
};
//***********************************************************************************
//***********************************************************************************
//***********************************************************************************
template<class TDataType, class TReturnType = TDataType>
class SubReduction
{
public:
typedef TDataType value_type;
typedef TReturnType return_type;
TReturnType mValue = TReturnType(); // deliberately making the member value public, to allow one to change it as needed
/// access to reduced value
TReturnType GetValue() const
{
return mValue;
}
/// NON-THREADSAFE (fast) value of reduction, to be used within a single thread
void LocalReduce(const TDataType value){
mValue -= value;
}
/// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads
void ThreadSafeReduce(const SubReduction<TDataType, TReturnType>& rOther)
{
AtomicAdd(mValue, rOther.mValue);
}
};
//***********************************************************************************
//***********************************************************************************
//***********************************************************************************
template<class TDataType, class TReturnType = TDataType>
class MaxReduction
{
public:
typedef TDataType value_type;
typedef TReturnType return_type;
TReturnType mValue = std::numeric_limits<TReturnType>::lowest(); // deliberately making the member value public, to allow one to change it as needed
/// access to reduced value
TReturnType GetValue() const
{
return mValue;
}
/// NON-THREADSAFE (fast) value of reduction, to be used within a single thread
void LocalReduce(const TDataType value){
mValue = std::max(mValue,value);
}
/// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads
void ThreadSafeReduce(const MaxReduction<TDataType, TReturnType>& rOther)
{
#pragma omp critical
mValue = std::max(mValue,rOther.mValue);
}
};
//***********************************************************************************
//***********************************************************************************
//***********************************************************************************
template<class TDataType, class TReturnType = TDataType>
class MinReduction
{
public:
typedef TDataType value_type;
typedef TReturnType return_type;
TReturnType mValue = std::numeric_limits<TReturnType>::max(); // deliberately making the member value public, to allow one to change it as needed
/// access to reduced value
TReturnType GetValue() const
{
return mValue;
}
/// NON-THREADSAFE (fast) value of reduction, to be used within a single thread
void LocalReduce(const TDataType value){
mValue = std::min(mValue,value);
}
/// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads
void ThreadSafeReduce(const MinReduction<TDataType, TReturnType>& rOther)
{
#pragma omp critical
mValue = std::min(mValue,rOther.mValue);
}
};
//***********************************************************************************
//***********************************************************************************
//***********************************************************************************
template<class TDataType, class TReturnType = std::vector<TDataType>>
class AccumReduction
{
public:
typedef TDataType value_type;
typedef TReturnType return_type;
TReturnType mValue = TReturnType(); // deliberately making the member value public, to allow one to change it as needed
/// access to reduced value
TReturnType GetValue() const
{
return mValue;
}
/// NON-THREADSAFE (fast) value of reduction, to be used within a single thread
void LocalReduce(const TDataType value){
mValue.push_back(value);
}
/// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads
void ThreadSafeReduce(const AccumReduction<TDataType, TReturnType>& rOther)
{
#pragma omp critical
mValue.insert(mValue.end(), rOther.mValue.begin(), rOther.mValue.end());
}
};
template <class... Reducer>
struct CombinedReduction {
typedef std::tuple<typename Reducer::value_type...> value_type;
typedef std::tuple<typename Reducer::return_type...> return_type;
std::tuple<Reducer...> mChild;
CombinedReduction() {}
/// access to reduced value
return_type GetValue(){
return_type return_value;
fill_value<0>(return_value);
return return_value;
}
template <int I, class T>
typename std::enable_if<(I < sizeof...(Reducer)), void>::type
fill_value(T& v) {
std::get<I>(v) = std::get<I>(mChild).GetValue();
fill_value<I+1>(v);
};
template <int I, class T>
typename std::enable_if<(I == sizeof...(Reducer)), void>::type
fill_value(T& v) {}
/// NON-THREADSAFE (fast) value of reduction, to be used within a single thread
template <class... T>
void LocalReduce(const std::tuple<T...> &&v) {
// Static recursive loop over tuple elements
reduce_local<0>(v);
}
/// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads
void ThreadSafeReduce(const CombinedReduction &other) {
reduce_global<0>(other);
}
private:
template <int I, class T>
typename std::enable_if<(I < sizeof...(Reducer)), void>::type
reduce_local(T &&v) {
std::get<I>(mChild).LocalReduce(std::get<I>(v));
reduce_local<I+1>(std::forward<T>(v));
};
template <int I, class T>
typename std::enable_if<(I == sizeof...(Reducer)), void>::type
reduce_local(T &&v) {
// Exit static recursion
}
template <int I>
typename std::enable_if<(I < sizeof...(Reducer)), void>::type
reduce_global(const CombinedReduction &other) {
std::get<I>(mChild).ThreadSafeReduce(std::get<I>(other.mChild));
reduce_global<I+1>(other);
}
template <int I>
typename std::enable_if<(I == sizeof...(Reducer)), void>::type
reduce_global(const CombinedReduction &other) {
// Exit static recursion
}
};
} // namespace Kratos.
#endif // KRATOS_REDUCTION_UTILITIES_H_INCLUDED defined
|
convolution_1x1_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8to1_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
static void conv1x1s2_sgemm_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const int64_t* r0 = bottom_blob.channel(p);
int64_t* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to1_int8_sse(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
omp_for_nowait.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int test_omp_for_nowait()
{
int result;
int count;
int j;
int myarray[LOOPCOUNT];
result = 0;
count = 0;
#pragma omp parallel
{
int rank;
int i;
rank = omp_get_thread_num();
#pragma omp for nowait
for (i = 0; i < LOOPCOUNT; i++) {
if (i == 0) {
my_sleep(SLEEPTIME);
count = 1;
#pragma omp flush(count)
}
}
#pragma omp for
for (i = 0; i < LOOPCOUNT; i++) {
#pragma omp flush(count)
if (count == 0)
result = 1;
}
}
return result;
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_nowait()) {
num_failed++;
}
}
return num_failed;
}
|
memory.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Memory management utilities
*
*****************************************************************************/
#include "_hypre_utilities.h"
#include "_hypre_utilities.hpp"
#ifdef HYPRE_USE_UMALLOC
#undef HYPRE_USE_UMALLOC
#endif
/******************************************************************************
*
* Helper routines
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* hypre_OutOfMemory
*--------------------------------------------------------------------------*/
static inline void
hypre_OutOfMemory(size_t size)
{
hypre_error_w_msg(HYPRE_ERROR_MEMORY, "Out of memory trying to allocate too many bytes\n");
hypre_assert(0);
fflush(stdout);
}
static inline void
hypre_WrongMemoryLocation()
{
hypre_error_w_msg(HYPRE_ERROR_MEMORY,
"Wrong HYPRE MEMORY location: Only HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE and HYPRE_MEMORY_HOST_PINNED are supported!\n");
hypre_assert(0);
fflush(stdout);
}
/*==========================================================================
* Physical memory location (hypre_MemoryLocation) interface
*==========================================================================*/
/*--------------------------------------------------------------------------
* Memset
*--------------------------------------------------------------------------*/
static inline void
hypre_HostMemset(void *ptr, HYPRE_Int value, size_t num)
{
memset(ptr, value, num);
}
static inline void
hypre_DeviceMemset(void *ptr, HYPRE_Int value, size_t num)
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
#pragma omp target teams distribute parallel for is_device_ptr(ptr)
for (size_t i = 0; i < num; i++)
{
((unsigned char *) ptr)[i] = (unsigned char) value;
}
#else
memset(ptr, value, num);
HYPRE_OMPOffload(hypre__offload_device_num, ptr, num, "update", "to");
#endif
/* HYPRE_CUDA_CALL( cudaDeviceSynchronize() ); */
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemset(ptr, value, num) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemset(ptr, value, num) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( (hypre_HandleComputeStream(hypre_handle()))->memset(ptr, value, num).wait() );
#endif
}
static inline void
hypre_UnifiedMemset(void *ptr, HYPRE_Int value, size_t num)
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
#pragma omp target teams distribute parallel for is_device_ptr(ptr)
for (size_t i = 0; i < num; i++)
{
((unsigned char *) ptr)[i] = (unsigned char) value;
}
#else
memset(ptr, value, num);
HYPRE_OMPOffload(hypre__offload_device_num, ptr, num, "update", "to");
#endif
/* HYPRE_CUDA_CALL( cudaDeviceSynchronize() ); */
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemset(ptr, value, num) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemset(ptr, value, num) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( (hypre_HandleComputeStream(hypre_handle()))->memset(ptr, value, num).wait() );
#endif
}
/*--------------------------------------------------------------------------
* Memprefetch
*--------------------------------------------------------------------------*/
static inline void
hypre_UnifiedMemPrefetch(void *ptr, size_t size, hypre_MemoryLocation location)
{
#if defined(HYPRE_USING_GPU)
#ifdef HYPRE_DEBUG
hypre_MemoryLocation tmp;
hypre_GetPointerLocation(ptr, &tmp);
/* do not use hypre_assert, which has alloc and free;
* will create an endless loop otherwise */
assert(hypre_MEMORY_UNIFIED == tmp);
#endif
#endif
#if defined(HYPRE_USING_CUDA)
if (location == hypre_MEMORY_DEVICE)
{
HYPRE_CUDA_CALL( cudaMemPrefetchAsync(ptr, size, hypre_HandleDevice(hypre_handle()),
hypre_HandleComputeStream(hypre_handle())) );
}
else if (location == hypre_MEMORY_HOST)
{
HYPRE_CUDA_CALL( cudaMemPrefetchAsync(ptr, size, cudaCpuDeviceId,
hypre_HandleComputeStream(hypre_handle())) );
}
#endif
#if defined(HYPRE_USING_HIP)
// Not currently implemented for HIP, but leaving place holder
/*
*if (location == hypre_MEMORY_DEVICE)
*{
* HYPRE_HIP_CALL( hipMemPrefetchAsync(ptr, size, hypre_HandleDevice(hypre_handle()),
* hypre_HandleComputeStream(hypre_handle())) );
*}
*else if (location == hypre_MEMORY_HOST)
*{
* HYPRE_CUDA_CALL( hipMemPrefetchAsync(ptr, size, cudaCpuDeviceId,
* hypre_HandleComputeStream(hypre_handle())) );
*}
*/
#endif
#if defined(HYPRE_USING_SYCL)
if (location == hypre_MEMORY_DEVICE)
{
HYPRE_SYCL_CALL( hypre_HandleComputeStream(hypre_handle())->prefetch(ptr, size).wait() );
}
#endif
}
/*--------------------------------------------------------------------------
* Malloc
*--------------------------------------------------------------------------*/
static inline void *
hypre_HostMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
#if defined(HYPRE_USING_UMPIRE_HOST)
hypre_umpire_host_pooled_allocate(&ptr, size);
if (zeroinit)
{
memset(ptr, 0, size);
}
#else
if (zeroinit)
{
ptr = calloc(size, 1);
}
else
{
ptr = malloc(size);
}
#endif
return ptr;
}
static inline void *
hypre_DeviceMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
if ( hypre_HandleUserDeviceMalloc(hypre_handle()) )
{
hypre_HandleUserDeviceMalloc(hypre_handle())(&ptr, size);
}
else
{
#if defined(HYPRE_USING_UMPIRE_DEVICE)
hypre_umpire_device_pooled_allocate(&ptr, size);
#else
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
ptr = omp_target_alloc(size, hypre__offload_device_num);
#else
ptr = malloc(size + sizeof(size_t));
size_t *sp = (size_t*) ptr;
sp[0] = size;
ptr = (void *) (&sp[1]);
HYPRE_OMPOffload(hypre__offload_device_num, ptr, size, "enter", "alloc");
#endif
#endif
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_DEVICE_POOL)
HYPRE_CUDA_CALL( hypre_CachingMallocDevice(&ptr, size) );
#else
HYPRE_CUDA_CALL( cudaMalloc(&ptr, size) );
#endif
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMalloc(&ptr, size) );
#endif
#if defined(HYPRE_USING_SYCL)
ptr = (void *)sycl::malloc_device(size, *(hypre_HandleComputeStream(hypre_handle())));
#endif
#endif /* #if defined(HYPRE_USING_UMPIRE_DEVICE) */
}
if (ptr && zeroinit)
{
hypre_DeviceMemset(ptr, 0, size);
}
return ptr;
}
static inline void *
hypre_UnifiedMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
#if defined(HYPRE_USING_UMPIRE_UM)
hypre_umpire_um_pooled_allocate(&ptr, size);
#else
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
ptr = omp_target_alloc(size, hypre__offload_device_num);
#else
ptr = malloc(size + sizeof(size_t));
size_t *sp = (size_t*) ptr;
sp[0] = size;
ptr = (void *) (&sp[1]);
HYPRE_OMPOffload(hypre__offload_device_num, ptr, size, "enter", "alloc");
#endif
#endif
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_DEVICE_POOL)
HYPRE_CUDA_CALL( hypre_CachingMallocManaged(&ptr, size) );
#else
HYPRE_CUDA_CALL( cudaMallocManaged(&ptr, size, cudaMemAttachGlobal) );
#endif
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMallocManaged(&ptr, size, hipMemAttachGlobal) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( ptr = (void *)sycl::malloc_shared(size,
*(hypre_HandleComputeStream(hypre_handle()))) );
#endif
#endif /* #if defined(HYPRE_USING_UMPIRE_UM) */
/* prefecth to device */
if (ptr)
{
hypre_UnifiedMemPrefetch(ptr, size, hypre_MEMORY_DEVICE);
}
if (ptr && zeroinit)
{
hypre_UnifiedMemset(ptr, 0, size);
}
return ptr;
}
static inline void *
hypre_HostPinnedMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
#if defined(HYPRE_USING_UMPIRE_PINNED)
hypre_umpire_pinned_pooled_allocate(&ptr, size);
#else
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMallocHost(&ptr, size) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipHostMalloc(&ptr, size) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( ptr = (void *)sycl::malloc_host(size,
*(hypre_HandleComputeStream(hypre_handle()))) );
#endif
#endif /* #if defined(HYPRE_USING_UMPIRE_PINNED) */
if (ptr && zeroinit)
{
hypre_HostMemset(ptr, 0, size);
}
return ptr;
}
static inline void *
hypre_MAlloc_core(size_t size, HYPRE_Int zeroinit, hypre_MemoryLocation location)
{
if (size == 0)
{
return NULL;
}
void *ptr = NULL;
switch (location)
{
case hypre_MEMORY_HOST :
ptr = hypre_HostMalloc(size, zeroinit);
break;
case hypre_MEMORY_DEVICE :
ptr = hypre_DeviceMalloc(size, zeroinit);
break;
case hypre_MEMORY_UNIFIED :
ptr = hypre_UnifiedMalloc(size, zeroinit);
break;
case hypre_MEMORY_HOST_PINNED :
ptr = hypre_HostPinnedMalloc(size, zeroinit);
break;
default :
hypre_WrongMemoryLocation();
}
if (!ptr)
{
hypre_OutOfMemory(size);
hypre_MPI_Abort(hypre_MPI_COMM_WORLD, -1);
}
return ptr;
}
void *
_hypre_MAlloc(size_t size, hypre_MemoryLocation location)
{
return hypre_MAlloc_core(size, 0, location);
}
/*--------------------------------------------------------------------------
* Free
*--------------------------------------------------------------------------*/
static inline void
hypre_HostFree(void *ptr)
{
#if defined(HYPRE_USING_UMPIRE_HOST)
hypre_umpire_host_pooled_free(ptr);
#else
free(ptr);
#endif
}
static inline void
hypre_DeviceFree(void *ptr)
{
if ( hypre_HandleUserDeviceMfree(hypre_handle()) )
{
hypre_HandleUserDeviceMfree(hypre_handle())(ptr);
}
else
{
#if defined(HYPRE_USING_UMPIRE_DEVICE)
hypre_umpire_device_pooled_free(ptr);
#else
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_free(ptr, hypre__offload_device_num);
#else
HYPRE_OMPOffload(hypre__offload_device_num, ptr, ((size_t *) ptr)[-1], "exit", "delete");
#endif
#endif
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_DEVICE_POOL)
HYPRE_CUDA_CALL( hypre_CachingFreeDevice(ptr) );
#else
HYPRE_CUDA_CALL( cudaFree(ptr) );
#endif
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipFree(ptr) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( sycl::free(ptr, *(hypre_HandleComputeStream(hypre_handle()))) );
#endif
#endif /* #if defined(HYPRE_USING_UMPIRE_DEVICE) */
}
}
static inline void
hypre_UnifiedFree(void *ptr)
{
#if defined(HYPRE_USING_UMPIRE_UM)
hypre_umpire_um_pooled_free(ptr);
#else
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_free(ptr, hypre__offload_device_num);
#else
HYPRE_OMPOffload(hypre__offload_device_num, ptr, ((size_t *) ptr)[-1], "exit", "delete");
#endif
#endif
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_DEVICE_POOL)
HYPRE_CUDA_CALL( hypre_CachingFreeManaged(ptr) );
#else
HYPRE_CUDA_CALL( cudaFree(ptr) );
#endif
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipFree(ptr) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( sycl::free(ptr, *(hypre_HandleComputeStream(hypre_handle()))) );
#endif
#endif /* #if defined(HYPRE_USING_UMPIRE_UM) */
}
static inline void
hypre_HostPinnedFree(void *ptr)
{
#if defined(HYPRE_USING_UMPIRE_PINNED)
hypre_umpire_pinned_pooled_free(ptr);
#else
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaFreeHost(ptr) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipHostFree(ptr) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( sycl::free(ptr, *(hypre_HandleComputeStream(hypre_handle()))) );
#endif
#endif /* #if defined(HYPRE_USING_UMPIRE_PINNED) */
}
static inline void
hypre_Free_core(void *ptr, hypre_MemoryLocation location)
{
if (!ptr)
{
return;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL)
#ifdef HYPRE_DEBUG
hypre_MemoryLocation tmp;
hypre_GetPointerLocation(ptr, &tmp);
/* do not use hypre_assert, which has alloc and free;
* will create an endless loop otherwise */
assert(location == tmp);
#endif
#endif
switch (location)
{
case hypre_MEMORY_HOST :
hypre_HostFree(ptr);
break;
case hypre_MEMORY_DEVICE :
hypre_DeviceFree(ptr);
break;
case hypre_MEMORY_UNIFIED :
hypre_UnifiedFree(ptr);
break;
case hypre_MEMORY_HOST_PINNED :
hypre_HostPinnedFree(ptr);
break;
default :
hypre_WrongMemoryLocation();
}
}
void
_hypre_Free(void *ptr, hypre_MemoryLocation location)
{
hypre_Free_core(ptr, location);
}
/*--------------------------------------------------------------------------
* Memcpy
*--------------------------------------------------------------------------*/
static inline void
hypre_Memcpy_core(void *dst, void *src, size_t size, hypre_MemoryLocation loc_dst,
hypre_MemoryLocation loc_src)
{
#if defined(HYPRE_USING_SYCL)
sycl::queue* q = hypre_HandleComputeStream(hypre_handle());
#endif
if (dst == NULL || src == NULL)
{
if (size)
{
hypre_printf("hypre_Memcpy warning: copy %ld bytes from %p to %p !\n", size, src, dst);
hypre_assert(0);
}
return;
}
if (dst == src)
{
return;
}
/* Totally 4 x 4 = 16 cases */
/* 4: Host <-- Host, Host <-- Pinned,
* Pinned <-- Host, Pinned <-- Pinned.
*/
if ( loc_dst != hypre_MEMORY_DEVICE && loc_dst != hypre_MEMORY_UNIFIED &&
loc_src != hypre_MEMORY_DEVICE && loc_src != hypre_MEMORY_UNIFIED )
{
memcpy(dst, src, size);
return;
}
/* 3: UVM <-- Device, Device <-- UVM, UVM <-- UVM */
if ( (loc_dst == hypre_MEMORY_UNIFIED && loc_src == hypre_MEMORY_DEVICE) ||
(loc_dst == hypre_MEMORY_DEVICE && loc_src == hypre_MEMORY_UNIFIED) ||
(loc_dst == hypre_MEMORY_UNIFIED && loc_src == hypre_MEMORY_UNIFIED) )
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_device_num, hypre__offload_device_num);
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemcpy(dst, src, size, cudaMemcpyDeviceToDevice) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemcpy(dst, src, size, hipMemcpyDeviceToDevice) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( q->memcpy(dst, src, size).wait() );
#endif
return;
}
/* 2: UVM <-- Host, UVM <-- Pinned */
if (loc_dst == hypre_MEMORY_UNIFIED)
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_device_num, hypre__offload_host_num);
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemcpy(dst, src, size, hipMemcpyHostToDevice) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( q->memcpy(dst, src, size).wait() );
#endif
return;
}
/* 2: Host <-- UVM, Pinned <-- UVM */
if (loc_src == hypre_MEMORY_UNIFIED)
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_host_num, hypre__offload_device_num);
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemcpy(dst, src, size, hipMemcpyDeviceToHost) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( q->memcpy(dst, src, size).wait() );
#endif
return;
}
/* 2: Device <-- Host, Device <-- Pinned */
if ( loc_dst == hypre_MEMORY_DEVICE && (loc_src == hypre_MEMORY_HOST ||
loc_src == hypre_MEMORY_HOST_PINNED) )
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_device_num, hypre__offload_host_num);
#else
memcpy(dst, src, size);
HYPRE_OMPOffload(hypre__offload_device_num, dst, size, "update", "to");
#endif
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemcpy(dst, src, size, hipMemcpyHostToDevice) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( q->memcpy(dst, src, size).wait() );
#endif
return;
}
/* 2: Host <-- Device, Pinned <-- Device */
if ( (loc_dst == hypre_MEMORY_HOST || loc_dst == hypre_MEMORY_HOST_PINNED) &&
loc_src == hypre_MEMORY_DEVICE )
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_host_num, hypre__offload_device_num);
#else
HYPRE_OMPOffload(hypre__offload_device_num, src, size, "update", "from");
memcpy(dst, src, size);
#endif
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemcpy( dst, src, size, cudaMemcpyDeviceToHost) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemcpy(dst, src, size, hipMemcpyDeviceToHost) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( q->memcpy(dst, src, size).wait() );
#endif
return;
}
/* 1: Device <-- Device */
if (loc_dst == hypre_MEMORY_DEVICE && loc_src == hypre_MEMORY_DEVICE)
{
#if defined(HYPRE_USING_DEVICE_OPENMP)
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_device_num, hypre__offload_device_num);
#else
HYPRE_OMPOffload(hypre__offload_device_num, src, size, "update", "from");
memcpy(dst, src, size);
HYPRE_OMPOffload(hypre__offload_device_num, dst, size, "update", "to");
#endif
#endif
#if defined(HYPRE_USING_CUDA)
HYPRE_CUDA_CALL( cudaMemcpy(dst, src, size, cudaMemcpyDeviceToDevice) );
#endif
#if defined(HYPRE_USING_HIP)
HYPRE_HIP_CALL( hipMemcpy(dst, src, size, hipMemcpyDeviceToDevice) );
#endif
#if defined(HYPRE_USING_SYCL)
HYPRE_SYCL_CALL( q->memcpy(dst, src, size).wait() );
#endif
return;
}
hypre_WrongMemoryLocation();
}
/*--------------------------------------------------------------------------*
* ExecPolicy
*--------------------------------------------------------------------------*/
static inline HYPRE_ExecutionPolicy
hypre_GetExecPolicy1_core(hypre_MemoryLocation location)
{
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_UNDEFINED;
switch (location)
{
case hypre_MEMORY_HOST :
case hypre_MEMORY_HOST_PINNED :
exec = HYPRE_EXEC_HOST;
break;
case hypre_MEMORY_DEVICE :
exec = HYPRE_EXEC_DEVICE;
break;
case hypre_MEMORY_UNIFIED :
#if defined(HYPRE_USING_GPU)
exec = hypre_HandleDefaultExecPolicy(hypre_handle());
#endif
break;
default :
hypre_WrongMemoryLocation();
}
hypre_assert(exec != HYPRE_EXEC_UNDEFINED);
return exec;
}
/* for binary operation */
static inline HYPRE_ExecutionPolicy
hypre_GetExecPolicy2_core(hypre_MemoryLocation location1,
hypre_MemoryLocation location2)
{
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_UNDEFINED;
/* HOST_PINNED has the same exec policy as HOST */
if (location1 == hypre_MEMORY_HOST_PINNED)
{
location1 = hypre_MEMORY_HOST;
}
if (location2 == hypre_MEMORY_HOST_PINNED)
{
location2 = hypre_MEMORY_HOST;
}
/* no policy for these combinations */
if ( (location1 == hypre_MEMORY_HOST && location2 == hypre_MEMORY_DEVICE) ||
(location2 == hypre_MEMORY_HOST && location1 == hypre_MEMORY_DEVICE) )
{
exec = HYPRE_EXEC_UNDEFINED;
}
/* this should never happen */
if ( (location1 == hypre_MEMORY_UNIFIED && location2 == hypre_MEMORY_DEVICE) ||
(location2 == hypre_MEMORY_UNIFIED && location1 == hypre_MEMORY_DEVICE) )
{
exec = HYPRE_EXEC_UNDEFINED;
}
if (location1 == hypre_MEMORY_UNIFIED && location2 == hypre_MEMORY_UNIFIED)
{
#if defined(HYPRE_USING_GPU)
exec = hypre_HandleDefaultExecPolicy(hypre_handle());
#endif
}
if (location1 == hypre_MEMORY_HOST || location2 == hypre_MEMORY_HOST)
{
exec = HYPRE_EXEC_HOST;
}
if (location1 == hypre_MEMORY_DEVICE || location2 == hypre_MEMORY_DEVICE)
{
exec = HYPRE_EXEC_DEVICE;
}
hypre_assert(exec != HYPRE_EXEC_UNDEFINED);
return exec;
}
/*==========================================================================
* Conceptual memory location (HYPRE_MemoryLocation) interface
*==========================================================================*/
/*--------------------------------------------------------------------------
* hypre_Memset
* "Sets the first num bytes of the block of memory pointed by ptr to the specified value
* (*** value is interpreted as an unsigned char ***)"
* http://www.cplusplus.com/reference/cstring/memset/
*--------------------------------------------------------------------------*/
void *
hypre_Memset(void *ptr, HYPRE_Int value, size_t num, HYPRE_MemoryLocation location)
{
if (num == 0)
{
return ptr;
}
if (ptr == NULL)
{
if (num)
{
hypre_printf("hypre_Memset warning: set values for %ld bytes at %p !\n", num, ptr);
}
return ptr;
}
switch (hypre_GetActualMemLocation(location))
{
case hypre_MEMORY_HOST :
case hypre_MEMORY_HOST_PINNED :
hypre_HostMemset(ptr, value, num);
break;
case hypre_MEMORY_DEVICE :
hypre_DeviceMemset(ptr, value, num);
break;
case hypre_MEMORY_UNIFIED :
hypre_UnifiedMemset(ptr, value, num);
break;
default :
hypre_WrongMemoryLocation();
}
return ptr;
}
/*--------------------------------------------------------------------------
* Memprefetch
*--------------------------------------------------------------------------*/
void
hypre_MemPrefetch(void *ptr, size_t size, HYPRE_MemoryLocation location)
{
hypre_UnifiedMemPrefetch( ptr, size, hypre_GetActualMemLocation(location) );
}
/*--------------------------------------------------------------------------*
* hypre_MAlloc, hypre_CAlloc
*--------------------------------------------------------------------------*/
void *
hypre_MAlloc(size_t size, HYPRE_MemoryLocation location)
{
return hypre_MAlloc_core(size, 0, hypre_GetActualMemLocation(location));
}
void *
hypre_CAlloc( size_t count, size_t elt_size, HYPRE_MemoryLocation location)
{
return hypre_MAlloc_core(count * elt_size, 1, hypre_GetActualMemLocation(location));
}
/*--------------------------------------------------------------------------
* hypre_Free
*--------------------------------------------------------------------------*/
void
hypre_Free(void *ptr, HYPRE_MemoryLocation location)
{
hypre_Free_core(ptr, hypre_GetActualMemLocation(location));
}
/*--------------------------------------------------------------------------
* hypre_Memcpy
*--------------------------------------------------------------------------*/
void
hypre_Memcpy(void *dst, void *src, size_t size, HYPRE_MemoryLocation loc_dst,
HYPRE_MemoryLocation loc_src)
{
hypre_Memcpy_core( dst, src, size, hypre_GetActualMemLocation(loc_dst),
hypre_GetActualMemLocation(loc_src) );
}
/*--------------------------------------------------------------------------
* hypre_ReAlloc
*--------------------------------------------------------------------------*/
void *
hypre_ReAlloc(void *ptr, size_t size, HYPRE_MemoryLocation location)
{
if (size == 0)
{
hypre_Free(ptr, location);
return NULL;
}
if (ptr == NULL)
{
return hypre_MAlloc(size, location);
}
if (hypre_GetActualMemLocation(location) != hypre_MEMORY_HOST)
{
hypre_printf("hypre_TReAlloc only works with HYPRE_MEMORY_HOST; Use hypre_TReAlloc_v2 instead!\n");
hypre_assert(0);
hypre_MPI_Abort(hypre_MPI_COMM_WORLD, -1);
return NULL;
}
#if defined(HYPRE_USING_UMPIRE_HOST)
ptr = hypre_umpire_host_pooled_realloc(ptr, size);
#else
ptr = realloc(ptr, size);
#endif
if (!ptr)
{
hypre_OutOfMemory(size);
}
return ptr;
}
void *
hypre_ReAlloc_v2(void *ptr, size_t old_size, size_t new_size, HYPRE_MemoryLocation location)
{
if (new_size == 0)
{
hypre_Free(ptr, location);
return NULL;
}
if (ptr == NULL)
{
return hypre_MAlloc(new_size, location);
}
void *new_ptr = hypre_MAlloc(new_size, location);
size_t smaller_size = new_size > old_size ? old_size : new_size;
hypre_Memcpy(new_ptr, ptr, smaller_size, location, location);
hypre_Free(ptr, location);
ptr = new_ptr;
if (!ptr)
{
hypre_OutOfMemory(new_size);
}
return ptr;
}
/*--------------------------------------------------------------------------*
* hypre_GetExecPolicy: return execution policy based on memory locations
*--------------------------------------------------------------------------*/
/* for unary operation */
HYPRE_ExecutionPolicy
hypre_GetExecPolicy1(HYPRE_MemoryLocation location)
{
return hypre_GetExecPolicy1_core(hypre_GetActualMemLocation(location));
}
/* for binary operation */
HYPRE_ExecutionPolicy
hypre_GetExecPolicy2(HYPRE_MemoryLocation location1,
HYPRE_MemoryLocation location2)
{
return hypre_GetExecPolicy2_core(hypre_GetActualMemLocation(location1),
hypre_GetActualMemLocation(location2));
}
/*--------------------------------------------------------------------------
* Query the actual memory location pointed by ptr
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GetPointerLocation(const void *ptr, hypre_MemoryLocation *memory_location)
{
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_GPU)
*memory_location = hypre_MEMORY_UNDEFINED;
#if defined(HYPRE_USING_CUDA)
struct cudaPointerAttributes attr;
#if (CUDART_VERSION >= 10000)
#if (CUDART_VERSION >= 11000)
HYPRE_CUDA_CALL( cudaPointerGetAttributes(&attr, ptr) );
#else
cudaError_t err = cudaPointerGetAttributes(&attr, ptr);
if (err != cudaSuccess)
{
ierr = 1;
/* clear the error */
cudaGetLastError();
}
#endif
if (attr.type == cudaMemoryTypeUnregistered)
{
*memory_location = hypre_MEMORY_HOST;
}
else if (attr.type == cudaMemoryTypeHost)
{
*memory_location = hypre_MEMORY_HOST_PINNED;
}
else if (attr.type == cudaMemoryTypeDevice)
{
*memory_location = hypre_MEMORY_DEVICE;
}
else if (attr.type == cudaMemoryTypeManaged)
{
*memory_location = hypre_MEMORY_UNIFIED;
}
#else
cudaError_t err = cudaPointerGetAttributes(&attr, ptr);
if (err != cudaSuccess)
{
ierr = 1;
/* clear the error */
cudaGetLastError();
if (err == cudaErrorInvalidValue)
{
*memory_location = hypre_MEMORY_HOST;
}
}
else if (attr.isManaged)
{
*memory_location = hypre_MEMORY_UNIFIED;
}
else if (attr.memoryType == cudaMemoryTypeDevice)
{
*memory_location = hypre_MEMORY_DEVICE;
}
else if (attr.memoryType == cudaMemoryTypeHost)
{
*memory_location = hypre_MEMORY_HOST_PINNED;
}
#endif // CUDART_VERSION >= 10000
#endif // defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_HIP)
struct hipPointerAttribute_t attr;
*memory_location = hypre_MEMORY_UNDEFINED;
hipError_t err = hipPointerGetAttributes(&attr, ptr);
if (err != hipSuccess)
{
ierr = 1;
/* clear the error */
hipGetLastError();
if (err == hipErrorInvalidValue)
{
*memory_location = hypre_MEMORY_HOST;
}
}
else if (attr.isManaged)
{
*memory_location = hypre_MEMORY_UNIFIED;
}
else if (attr.memoryType == hipMemoryTypeDevice)
{
*memory_location = hypre_MEMORY_DEVICE;
}
else if (attr.memoryType == hipMemoryTypeHost)
{
*memory_location = hypre_MEMORY_HOST_PINNED;
}
#endif // defined(HYPRE_USING_HIP)
#if defined(HYPRE_USING_SYCL)
/* If the device is not setup, then all allocations are assumed to be on the host */
*memory_location = hypre_MEMORY_HOST;
if (hypre_HandleDeviceData(hypre_handle()))
{
if (hypre_HandleDevice(hypre_handle()))
{
sycl::usm::alloc allocType;
allocType = sycl::get_pointer_type(ptr, (hypre_HandleComputeStream(hypre_handle()))->get_context());
if (allocType == sycl::usm::alloc::unknown)
{
*memory_location = hypre_MEMORY_HOST;
}
else if (allocType == sycl::usm::alloc::host)
{
*memory_location = hypre_MEMORY_HOST_PINNED;
}
else if (allocType == sycl::usm::alloc::device)
{
*memory_location = hypre_MEMORY_DEVICE;
}
else if (allocType == sycl::usm::alloc::shared)
{
*memory_location = hypre_MEMORY_UNIFIED;
}
}
}
#endif //HYPRE_USING_SYCL
#else /* #if defined(HYPRE_USING_GPU) */
*memory_location = hypre_MEMORY_HOST;
#endif
return ierr;
}
#ifdef HYPRE_USING_MEMORY_TRACKER
/*--------------------------------------------------------------------------
* Memory tracker
* do not use hypre_T* in the following since we don't want to track them *
*--------------------------------------------------------------------------*/
hypre_MemoryTracker *
hypre_MemoryTrackerCreate()
{
hypre_MemoryTracker *ptr = (hypre_MemoryTracker *) calloc(1, sizeof(hypre_MemoryTracker));
return ptr;
}
void
hypre_MemoryTrackerDestroy(hypre_MemoryTracker *tracker)
{
if (tracker)
{
free(tracker->data);
free(tracker);
}
}
void
hypre_MemoryTrackerInsert(const char *action,
void *ptr,
size_t nbytes,
hypre_MemoryLocation memory_location,
const char *filename,
const char *function,
HYPRE_Int line)
{
if (ptr == NULL)
{
return;
}
hypre_MemoryTracker *tracker = hypre_memory_tracker();
if (tracker->alloced_size <= tracker->actual_size)
{
tracker->alloced_size = 2 * tracker->alloced_size + 1;
tracker->data = (hypre_MemoryTrackerEntry *) realloc(tracker->data,
tracker->alloced_size * sizeof(hypre_MemoryTrackerEntry));
}
hypre_assert(tracker->actual_size < tracker->alloced_size);
hypre_MemoryTrackerEntry *entry = tracker->data + tracker->actual_size;
sprintf(entry->_action, "%s", action);
entry->_ptr = ptr;
entry->_nbytes = nbytes;
entry->_memory_location = memory_location;
sprintf(entry->_filename, "%s", filename);
sprintf(entry->_function, "%s", function);
entry->_line = line;
/* -1 is the initial value */
entry->_pair = (size_t) -1;
tracker->actual_size ++;
}
/* do not use hypre_printf, hypre_fprintf, which have TAlloc
* endless loop "for (i = 0; i < tracker->actual_size; i++)" otherwise */
HYPRE_Int
hypre_PrintMemoryTracker()
{
HYPRE_Int myid, ierr = 0;
char filename[256];
FILE *file;
size_t i, j;
hypre_MemoryTracker *tracker = hypre_memory_tracker();
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid);
hypre_sprintf(filename, "HypreMemoryTrack.log.%05d", myid);
if ((file = fopen(filename, "a")) == NULL)
{
fprintf(stderr, "Error: can't open output file %s\n", filename);
return hypre_error_flag;
}
fprintf(file, "==== Operations:\n");
fprintf(file,
" ID EVENT ADDRESS BYTE LOCATION FILE(LINE) FUNCTION | Memory ( H P D U )\n");
size_t totl_bytes[hypre_MEMORY_UNIFIED + 1] = {0};
size_t peak_bytes[hypre_MEMORY_UNIFIED + 1] = {0};
size_t curr_bytes[hypre_MEMORY_UNIFIED + 1] = {0};
for (i = 0; i < tracker->actual_size; i++)
{
if (strstr(tracker->data[i]._action, "alloc") != NULL)
{
totl_bytes[tracker->data[i]._memory_location] += tracker->data[i]._nbytes;
curr_bytes[tracker->data[i]._memory_location] += tracker->data[i]._nbytes;
peak_bytes[tracker->data[i]._memory_location] =
hypre_max( curr_bytes[tracker->data[i]._memory_location],
peak_bytes[tracker->data[i]._memory_location] );
/* for each unpaired "alloc", find its "free" */
if (tracker->data[i]._pair != (size_t) -1)
{
if ( tracker->data[i]._pair >= tracker->actual_size ||
tracker->data[tracker->data[i]._pair]._pair != i)
{
fprintf(stderr, "hypre memory tracker internal error!\n");
hypre_MPI_Abort(hypre_MPI_COMM_WORLD, 1);
}
continue;
}
for (j = i + 1; j < tracker->actual_size; j++)
{
if ( strstr(tracker->data[j]._action, "free") != NULL &&
tracker->data[j]._pair == (size_t) -1 &&
tracker->data[i]._ptr == tracker->data[j]._ptr &&
tracker->data[i]._memory_location == tracker->data[j]._memory_location )
{
tracker->data[i]._pair = j;
tracker->data[j]._pair = i;
tracker->data[j]._nbytes = tracker->data[i]._nbytes;
break;
}
}
if (tracker->data[i]._pair == (size_t) -1)
{
fprintf(stderr, "%6zu: %16p may not freed\n", i, tracker->data[i]._ptr );
}
}
else if (strstr(tracker->data[i]._action, "free") != NULL)
{
size_t pair = tracker->data[i]._pair;
if (pair == (size_t) -1)
{
fprintf(stderr, "%6zu: unpaired free at %16p\n", i, tracker->data[i]._ptr );
}
else
{
curr_bytes[tracker->data[i]._memory_location] -= tracker->data[pair]._nbytes;
}
}
if (i < tracker->prev_end)
{
continue;
}
char memory_location[256];
char nbytes[32];
if (tracker->data[i]._memory_location == hypre_MEMORY_HOST)
{
sprintf(memory_location, "%s", "HOST");
}
else if (tracker->data[i]._memory_location == hypre_MEMORY_HOST_PINNED)
{
sprintf(memory_location, "%s", "HOST_PINNED");
}
else if (tracker->data[i]._memory_location == hypre_MEMORY_DEVICE)
{
sprintf(memory_location, "%s", "DEVICE");
}
else if (tracker->data[i]._memory_location == hypre_MEMORY_UNIFIED)
{
sprintf(memory_location, "%s", "UNIFIED");
}
else
{
sprintf(memory_location, "%s", "UNDEFINED");
}
if (tracker->data[i]._nbytes != (size_t) -1)
{
sprintf(nbytes, "%zu", tracker->data[i]._nbytes);
}
else
{
sprintf(nbytes, "%s", "");
}
fprintf(file, " %6zu %12s %16p %10s %16s %40s (%5d) %50s | %12zu %12zu %12zu %12zu\n",
i,
tracker->data[i]._action,
tracker->data[i]._ptr,
nbytes,
memory_location,
tracker->data[i]._filename,
tracker->data[i]._line,
tracker->data[i]._function,
curr_bytes[hypre_MEMORY_HOST],
curr_bytes[hypre_MEMORY_HOST_PINNED],
curr_bytes[hypre_MEMORY_DEVICE],
curr_bytes[hypre_MEMORY_UNIFIED]
);
}
fprintf(file, "\n==== Total allocated (byte):\n");
fprintf(file, "HOST: %16zu, HOST_PINNED %16zu, DEVICE %16zu, UNIFIED %16zu\n",
totl_bytes[hypre_MEMORY_HOST],
totl_bytes[hypre_MEMORY_HOST_PINNED],
totl_bytes[hypre_MEMORY_DEVICE],
totl_bytes[hypre_MEMORY_UNIFIED]);
fprintf(file, "\n==== Peak (byte):\n");
fprintf(file, "HOST: %16zu, HOST_PINNED %16zu, DEVICE %16zu, UNIFIED %16zu\n",
peak_bytes[hypre_MEMORY_HOST],
peak_bytes[hypre_MEMORY_HOST_PINNED],
peak_bytes[hypre_MEMORY_DEVICE],
peak_bytes[hypre_MEMORY_UNIFIED]);
fprintf(file, "\n==== Reachable (byte):\n");
fprintf(file, "HOST: %16zu, HOST_PINNED %16zu, DEVICE %16zu, UNIFIED %16zu\n",
curr_bytes[hypre_MEMORY_HOST],
curr_bytes[hypre_MEMORY_HOST_PINNED],
curr_bytes[hypre_MEMORY_DEVICE],
curr_bytes[hypre_MEMORY_UNIFIED]);
fprintf(file, "\n==== Warnings:\n");
for (i = 0; i < tracker->actual_size; i++)
{
if (tracker->data[i]._pair == (size_t) -1)
{
if (strstr(tracker->data[i]._action, "alloc") != NULL)
{
fprintf(file, "%6zu: %p may have not been freed\n", i, tracker->data[i]._ptr );
}
else if (strstr(tracker->data[i]._action, "free") != NULL)
{
fprintf(file, "%6zu: unpaired free at %16p\n", i, tracker->data[i]._ptr );
}
}
}
fclose(file);
tracker->prev_end = tracker->actual_size;
return ierr;
}
#endif
/*--------------------------------------------------------------------------*
* Memory Pool
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SetCubMemPoolSize(hypre_uint cub_bin_growth,
hypre_uint cub_min_bin,
hypre_uint cub_max_bin,
size_t cub_max_cached_bytes)
{
#if defined(HYPRE_USING_CUDA)
#ifdef HYPRE_USING_DEVICE_POOL
hypre_HandleCubBinGrowth(hypre_handle()) = cub_bin_growth;
hypre_HandleCubMinBin(hypre_handle()) = cub_min_bin;
hypre_HandleCubMaxBin(hypre_handle()) = cub_max_bin;
hypre_HandleCubMaxCachedBytes(hypre_handle()) = cub_max_cached_bytes;
//TODO XXX RL: cub_min_bin, cub_max_bin are not (re)set
if (hypre_HandleCubDevAllocator(hypre_handle()))
{
hypre_HandleCubDevAllocator(hypre_handle()) -> SetMaxCachedBytes(cub_max_cached_bytes);
}
if (hypre_HandleCubUvmAllocator(hypre_handle()))
{
hypre_HandleCubUvmAllocator(hypre_handle()) -> SetMaxCachedBytes(cub_max_cached_bytes);
}
#endif
#endif
return hypre_error_flag;
}
HYPRE_Int
HYPRE_SetGPUMemoryPoolSize(HYPRE_Int bin_growth,
HYPRE_Int min_bin,
HYPRE_Int max_bin,
size_t max_cached_bytes)
{
return hypre_SetCubMemPoolSize(bin_growth, min_bin, max_bin, max_cached_bytes);
}
#ifdef HYPRE_USING_DEVICE_POOL
cudaError_t
hypre_CachingMallocDevice(void **ptr, size_t nbytes)
{
if (!hypre_HandleCubDevAllocator(hypre_handle()))
{
hypre_HandleCubDevAllocator(hypre_handle()) =
hypre_DeviceDataCubCachingAllocatorCreate( hypre_HandleCubBinGrowth(hypre_handle()),
hypre_HandleCubMinBin(hypre_handle()),
hypre_HandleCubMaxBin(hypre_handle()),
hypre_HandleCubMaxCachedBytes(hypre_handle()),
false,
false,
false );
}
return hypre_HandleCubDevAllocator(hypre_handle()) -> DeviceAllocate(ptr, nbytes);
}
cudaError_t
hypre_CachingFreeDevice(void *ptr)
{
return hypre_HandleCubDevAllocator(hypre_handle()) -> DeviceFree(ptr);
}
cudaError_t
hypre_CachingMallocManaged(void **ptr, size_t nbytes)
{
if (!hypre_HandleCubUvmAllocator(hypre_handle()))
{
hypre_HandleCubUvmAllocator(hypre_handle()) =
hypre_DeviceDataCubCachingAllocatorCreate( hypre_HandleCubBinGrowth(hypre_handle()),
hypre_HandleCubMinBin(hypre_handle()),
hypre_HandleCubMaxBin(hypre_handle()),
hypre_HandleCubMaxCachedBytes(hypre_handle()),
false,
false,
true );
}
return hypre_HandleCubUvmAllocator(hypre_handle()) -> DeviceAllocate(ptr, nbytes);
}
cudaError_t
hypre_CachingFreeManaged(void *ptr)
{
return hypre_HandleCubUvmAllocator(hypre_handle()) -> DeviceFree(ptr);
}
hypre_cub_CachingDeviceAllocator *
hypre_DeviceDataCubCachingAllocatorCreate(hypre_uint bin_growth,
hypre_uint min_bin,
hypre_uint max_bin,
size_t max_cached_bytes,
bool skip_cleanup,
bool debug,
bool use_managed_memory)
{
hypre_cub_CachingDeviceAllocator *allocator =
new hypre_cub_CachingDeviceAllocator( bin_growth,
min_bin,
max_bin,
max_cached_bytes,
skip_cleanup,
debug,
use_managed_memory );
return allocator;
}
void
hypre_DeviceDataCubCachingAllocatorDestroy(hypre_DeviceData *data)
{
delete hypre_DeviceDataCubDevAllocator(data);
delete hypre_DeviceDataCubUvmAllocator(data);
}
#endif // #ifdef HYPRE_USING_DEVICE_POOL
#if defined(HYPRE_USING_UMPIRE_HOST)
HYPRE_Int
hypre_umpire_host_pooled_allocate(void **ptr, size_t nbytes)
{
hypre_Handle *handle = hypre_handle();
const char *resource_name = "HOST";
const char *pool_name = hypre_HandleUmpireHostPoolName(handle);
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
umpire_allocator pooled_allocator;
if ( umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name) )
{
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
}
else
{
umpire_allocator allocator;
umpire_resourcemanager_get_allocator_by_name(rm_ptr, resource_name, &allocator);
umpire_resourcemanager_make_allocator_pool(rm_ptr, pool_name, allocator,
hypre_HandleUmpireHostPoolSize(handle),
hypre_HandleUmpireBlockSize(handle), &pooled_allocator);
hypre_HandleOwnUmpireHostPool(handle) = 1;
}
*ptr = umpire_allocator_allocate(&pooled_allocator, nbytes);
return hypre_error_flag;
}
HYPRE_Int
hypre_umpire_host_pooled_free(void *ptr)
{
hypre_Handle *handle = hypre_handle();
const char *pool_name = hypre_HandleUmpireHostPoolName(handle);
umpire_allocator pooled_allocator;
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
hypre_assert(umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name));
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
umpire_allocator_deallocate(&pooled_allocator, ptr);
return hypre_error_flag;
}
void *
hypre_umpire_host_pooled_realloc(void *ptr, size_t size)
{
hypre_Handle *handle = hypre_handle();
const char *pool_name = hypre_HandleUmpireHostPoolName(handle);
umpire_allocator pooled_allocator;
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
hypre_assert(umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name));
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
ptr = umpire_resourcemanager_reallocate_with_allocator(rm_ptr, ptr, size, pooled_allocator);
return ptr;
}
#endif
#if defined(HYPRE_USING_UMPIRE_DEVICE)
HYPRE_Int
hypre_umpire_device_pooled_allocate(void **ptr, size_t nbytes)
{
hypre_Handle *handle = hypre_handle();
const hypre_int device_id = hypre_HandleDevice(handle);
char resource_name[16];
const char *pool_name = hypre_HandleUmpireDevicePoolName(handle);
hypre_sprintf(resource_name, "%s::%d", "DEVICE", device_id);
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
umpire_allocator pooled_allocator;
if ( umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name) )
{
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
}
else
{
umpire_allocator allocator;
umpire_resourcemanager_get_allocator_by_name(rm_ptr, resource_name, &allocator);
umpire_resourcemanager_make_allocator_pool(rm_ptr, pool_name, allocator,
hypre_HandleUmpireDevicePoolSize(handle),
hypre_HandleUmpireBlockSize(handle), &pooled_allocator);
hypre_HandleOwnUmpireDevicePool(handle) = 1;
}
*ptr = umpire_allocator_allocate(&pooled_allocator, nbytes);
return hypre_error_flag;
}
HYPRE_Int
hypre_umpire_device_pooled_free(void *ptr)
{
hypre_Handle *handle = hypre_handle();
const char *pool_name = hypre_HandleUmpireDevicePoolName(handle);
umpire_allocator pooled_allocator;
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
hypre_assert(umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name));
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
umpire_allocator_deallocate(&pooled_allocator, ptr);
return hypre_error_flag;
}
#endif
#if defined(HYPRE_USING_UMPIRE_UM)
HYPRE_Int
hypre_umpire_um_pooled_allocate(void **ptr, size_t nbytes)
{
hypre_Handle *handle = hypre_handle();
const char *resource_name = "UM";
const char *pool_name = hypre_HandleUmpireUMPoolName(handle);
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
umpire_allocator pooled_allocator;
if ( umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name) )
{
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
}
else
{
umpire_allocator allocator;
umpire_resourcemanager_get_allocator_by_name(rm_ptr, resource_name, &allocator);
umpire_resourcemanager_make_allocator_pool(rm_ptr, pool_name, allocator,
hypre_HandleUmpireUMPoolSize(handle),
hypre_HandleUmpireBlockSize(handle), &pooled_allocator);
hypre_HandleOwnUmpireUMPool(handle) = 1;
}
*ptr = umpire_allocator_allocate(&pooled_allocator, nbytes);
return hypre_error_flag;
}
HYPRE_Int
hypre_umpire_um_pooled_free(void *ptr)
{
hypre_Handle *handle = hypre_handle();
const char *pool_name = hypre_HandleUmpireUMPoolName(handle);
umpire_allocator pooled_allocator;
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
hypre_assert(umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name));
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
umpire_allocator_deallocate(&pooled_allocator, ptr);
return hypre_error_flag;
}
#endif
#if defined(HYPRE_USING_UMPIRE_PINNED)
HYPRE_Int
hypre_umpire_pinned_pooled_allocate(void **ptr, size_t nbytes)
{
hypre_Handle *handle = hypre_handle();
const char *resource_name = "PINNED";
const char *pool_name = hypre_HandleUmpirePinnedPoolName(handle);
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
umpire_allocator pooled_allocator;
if ( umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name) )
{
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
}
else
{
umpire_allocator allocator;
umpire_resourcemanager_get_allocator_by_name(rm_ptr, resource_name, &allocator);
umpire_resourcemanager_make_allocator_pool(rm_ptr, pool_name, allocator,
hypre_HandleUmpirePinnedPoolSize(handle),
hypre_HandleUmpireBlockSize(handle), &pooled_allocator);
hypre_HandleOwnUmpirePinnedPool(handle) = 1;
}
*ptr = umpire_allocator_allocate(&pooled_allocator, nbytes);
return hypre_error_flag;
}
HYPRE_Int
hypre_umpire_pinned_pooled_free(void *ptr)
{
const hypre_Handle *handle = hypre_handle();
const char *pool_name = hypre_HandleUmpirePinnedPoolName(handle);
umpire_allocator pooled_allocator;
umpire_resourcemanager *rm_ptr = &hypre_HandleUmpireResourceMan(handle);
hypre_assert(umpire_resourcemanager_is_allocator_name(rm_ptr, pool_name));
umpire_resourcemanager_get_allocator_by_name(rm_ptr, pool_name, &pooled_allocator);
umpire_allocator_deallocate(&pooled_allocator, ptr);
return hypre_error_flag;
}
#endif
|
test.c | #include <stdlib.h>
#include <check.h>
#include <omp.h>
#include <stdint.h>
static uint64_t fib_seq(int n)
{ /*{{{*/
if (n < 2)
return n;
return fib_seq(n - 1) + fib_seq(n - 2);
} /*}}}*/
START_TEST(omp_barrier_simple)
{/*{{{*/
int num_threads_reqd = 2;
int a[2];
a[0] = 42;
a[1] = -42;
#pragma omp parallel shared(a) num_threads(num_threads_reqd)
{
// Do large work with one thread.
if(omp_get_thread_num() == 0)
fib_seq(42);
// Both threads update their private variables.
a[omp_get_thread_num()] += ((omp_get_thread_num() == 0 ? -1 : 1) * 42);
#pragma omp barrier
// Check if the other thread has update its private variable.
ck_assert_int_eq(a[omp_get_thread_num() == 0 ? 1 : 0], 0);
// Do large work with one thread.
if(omp_get_thread_num() == 1)
fib_seq(42);
// Both threads update their private variables.
a[omp_get_thread_num()] += ((omp_get_thread_num() == 0 ? 1 : -1) * 42);
#pragma omp barrier
// Check if the other thread has update its private variable.
ck_assert_int_eq(a[omp_get_thread_num() == 0 ? 1 : 0], -1 * a[omp_get_thread_num() == 0 ? 0 : 1]);
}
}/*}}}*/
END_TEST
Suite* test_suite(void)
{/*{{{*/
Suite* s = suite_create("Test");
TCase* tc = tcase_create("omp_barrier");
tcase_add_test(tc, omp_barrier_simple);
tcase_set_timeout(tc, 10);
suite_add_tcase(s, tc);
return s;
}/*}}}*/
int main(void)
{/*{{{*/
int number_failed;
Suite* s;
SRunner* sr;
s = test_suite();
sr = srunner_create(s);
srunner_run_all(sr, CK_VERBOSE);
number_failed = srunner_ntests_failed(sr);
srunner_free(sr);
return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
}/*}}}*/
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/policy.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/registry.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringNotFalse(option) == MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
if (gamma != 0.0 && gamma != 1.0)
{
SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == QuantumRange)
return(MagickTrue);
if (image->matte != MagickTrue)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity)));
else if (opacity > 0)
SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/
(MagickRealType) opacity)));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
MagickPixelPacket
color;
ssize_t
y;
if (image->matte == MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->matte=MagickTrue;
GetMagickPixelPacket(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color);
status=CompositeImage(complete_mask,OverCompositeOp,mask,
mask->page.x-image->page.x,mask->page.y-image->page.y);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha)));
else if (intensity > 0)
SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange));
q++;
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MaxTextExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->x_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->x_resolution);
(void) SetImageProperty(image,"tiff:XResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->y_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->y_resolution);
(void) SetImageProperty(image,"tiff:YResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,
PixelPacket *q,IndexPacket *indexes,ssize_t x)
{
if (image->storage_class == PseudoClass)
{
PixelPacket
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel));
else
SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel));
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
(ssize_t) GetPixelIndex(indexes+x));
if ((type == 0) && (channels > 1))
return;
else
SetPixelAlpha(color,pixel);
SetPixelRGBO(q,color);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(q,pixel);
break;
}
case -2:
case 0:
{
SetPixelRed(q,pixel);
if ((channels < 3) || (type == -2))
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
break;
}
case -3:
case 1:
{
SetPixelGreen(q,pixel);
break;
}
case -4:
case 2:
{
SetPixelBlue(q,pixel);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,pixel);
else
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
mask->matte=MagickFalse;
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
(void) SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
(void) SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickFalse);
return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image* image,const PSDInfo* psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateImage(image,MagickFalse);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,(size_t) (psd_info.depth != 16 ?
256 : 65536));
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace);
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->matte=MagickFalse;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if (has_merged_image != MagickFalse || imageListLength == 1)
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.opacity=TransparentOpacity;
(void) SetImageBackgroundColor(image);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
next=image;
while (next != (Image *) NULL)
{
(void) SetImageProfile(next,GetStringInfoName(profile),profile);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PSB");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Large Document Format");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PSD");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Photoshop bitmap");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
ssize_t
i,
offset,
y;
if (next_image->compression == RLECompression)
{
offset=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
offset+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
offset=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
offset=WriteBlobMSBShort(image,Raw);
return((size_t) offset);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1)
? MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(Image *image)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
}
return(compact_pixels);
}
static ssize_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
length,
offset_length;
ssize_t
count;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsGrayImage(next_image,&next_image->exception) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ?
4 : 3);
if (next_image->matte != MagickFalse)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t)
channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsGrayImage(next_image,&next_image->exception) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->matte != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->x_resolution+0.5;
y_resolution=2.54*65536.0*image->y_resolution+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->x_resolution+0.5;
y_resolution=65536.0*image->y_resolution+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
ssize_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return((size_t) count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image)
{
char
layer_name[MaxTextExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->matte != MagickFalse)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,&image->exception) != MagickFalse))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorMatteType) && (image->storage_class == PseudoClass))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->colorspace != CMYKColorspace)
num_channels=(image->matte != MagickFalse ? 4UL : 3UL);
else
num_channels=(image->matte != MagickFalse ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsGrayImage(image,&image->exception) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsMonochromeImage(image,&image->exception) &&
(image->depth == 1) ? MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsGrayImage(image,&image->exception) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *)NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
(void) SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->matte != MagickFalse)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ?
4 : 3);
total_channels=channels;
if (next_image->matte != MagickFalse)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->matte != MagickFalse)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,
&image->exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image);
property=(const char *) GetImageProperty(next_image,"label");
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,&image->exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(unsigned char) (
mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,
MagickFalse) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
GB_binop__land_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_fp64
// A.*B function (eWiseMult): GB_AemultB__land_fp64
// A*D function (colscale): GB_AxD__land_fp64
// D*A function (rowscale): GB_DxB__land_fp64
// C+=B function (dense accum): GB_Cdense_accumB__land_fp64
// C+=b function (dense accum): GB_Cdense_accumb__land_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_fp64
// C=scalar+B GB_bind1st__land_fp64
// C=scalar+B' GB_bind1st_tran__land_fp64
// C=A+scalar GB_bind2nd__land_fp64
// C=A'+scalar GB_bind2nd_tran__land_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_FP64 || GxB_NO_LAND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__land_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
laplace3d.c | // This file is a part of Julia. License is MIT: https://julialang.org/license
// GCC command line: gcc -fopenmp -mavx2 laplace3d.c -o laplace3d
/* Laplace 3D
orig: simple serial version
naive: simple parallelized version
auto: some ninja knowledge, using icc directives
sse/avx: ninja-optimized
Requires Sandy Bridge and up.
Note that the SSE/AVX versions do not handle boundary conditions
and thus each dimension must be 4n+2/8n+2. Try 258x258x258.
2014.08.06 anand.deshpande Initial code.
2014.08.06 dhiraj.kalamkar Padding and streaming stores.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <math.h>
#include <unistd.h>
#include <immintrin.h>
#include <omp.h>
#if defined(__i386__)
static inline uint64_t rdtsc(void)
{
uint64_t x;
__asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
return x;
}
#elif defined(__x86_64__)
static inline uint64_t rdtsc(void)
{
unsigned hi, lo;
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
return ((uint64_t)lo) | (((uint64_t)hi) << 32);
}
#elif defined(_COMPILER_MICROSOFT_)
#include <intrin.h>
static inline uint64_t rdtsc(void)
{
return __rdtsc();
}
#endif
void l3d_naive(int nx, int padded_nx, int ny, int nz, float *u1, float *u2);
void l3d_auto(int nx, int padded_nx, int ny, int nz, float *u1, float *u2);
void l3d_sse(int nx, int padded_nx, int ny, int nz, float *u1, float *u2);
void l3d_avx(int nx, int padded_nx, int ny, int nz, float *u1, float *u2);
void l3d_orig(int nx, int ny, int nz, float *u1, float *u2);
double cpughz()
{
uint64_t t0 = rdtsc();
sleep(1);
uint64_t onesec = rdtsc() - t0;
return onesec*1.0/1e9;
}
int main(int argc, char **argv)
{
int nx, padded_nx, ny, nz, iters, i, j, k, ind, p_ind, verify,
nthreads, pad_size;
float *u1, *u1_p, *u1_orig, *u2, *u2_p, *u2_orig, *foo,
error_tol = 0.00001;
double ghz;
void (*l3d)(int nx, int padded_nx, int ny, int nz,
float *u1, float *u2);
if (argc != 7) {
fprintf(stderr, "Usage:\n"
" laplace3d <nx> <ny> <nz> <#iters> <naive|auto|sse|avx> "
"<verify?>\n");
exit(-1);
}
nx = strtol(argv[1], NULL, 10);
ny = strtol(argv[2], NULL, 10);
nz = strtol(argv[3], NULL, 10);
padded_nx = ((nx + 0x7) & (~0x7));
iters = strtol(argv[4], NULL, 10);
if (strncasecmp(argv[5], "naive", 5) == 0)
l3d = l3d_naive;
else if (strncasecmp(argv[5], "auto", 4) == 0)
l3d = l3d_auto;
else if (strncasecmp(argv[5], "sse", 3) == 0)
l3d = l3d_sse;
else if (strncasecmp(argv[5], "avx", 3) == 0)
l3d = l3d_avx;
else {
fprintf(stderr, "don't recognize %s. naive, auto, sse, or avx?\n",
argv[5]);
exit(-1);
}
verify = strtol(argv[6], NULL, 10);
ghz = cpughz();
nthreads = omp_get_max_threads();
printf("machine speed is %g GHz, using %d threads\n", ghz, nthreads);
printf("laplace3d: %d iterations on %dx%dx%d grid, "
"verification is %s\n",
iters, nx, ny, nz, verify ? "on" : "off");
/* pad for aligned access; non-naive only */
if (strncasecmp(argv[5], "naive", 5) != 0) {
pad_size = (((1 + padded_nx + padded_nx * ny) + 0xF) & (~0xF)) -
(1 + padded_nx + padded_nx * ny);
printf("using padded_nx = %d, pad_size = %d\n",
padded_nx, pad_size);
u1_p = (float *)_mm_malloc(sizeof (float) *
(padded_nx * ny * nz + pad_size), 64);
u2_p = (float *)_mm_malloc(sizeof (float) *
(padded_nx * ny * nz + pad_size), 64);
u1 = u1_p + pad_size;
u2 = u2_p + pad_size;
}
else {
u1_p = (float *)_mm_malloc(sizeof (float) * (nx * ny * nz), 64);
u2_p = (float *)_mm_malloc(sizeof (float) * (nx * ny * nz), 64);
u1 = u1_p;
u2 = u2_p;
padded_nx = nx;
}
u1_orig = (float *)_mm_malloc(sizeof (float) * nx * ny * nz, 64);
u2_orig = (float *)_mm_malloc(sizeof (float) * nx * ny * nz, 64);
// initialize
#pragma omp parallel for private(k,j,i,ind,p_ind)
for (k = 0; k < nz; ++k) {
for (j = 0; j < ny; ++j) {
for (i = 0; i < nx; ++i) {
ind = i + j*nx + k*nx*ny;
p_ind = i + j*padded_nx + k*padded_nx*ny;
if (i == 0 || i == nx - 1
|| j == 0 || j == ny - 1
|| k == 0 || k == nz - 1) {
// Dirichlet b.c.'s
u1[p_ind] = u1_orig[ind] = u2[p_ind] = 1.0f;
}
else {
u1[p_ind] = u1_orig[ind] = u2[p_ind] = 0.0f;
}
}
}
}
// run optimized version
uint64_t t0 = rdtsc();
for (i = 0; i < iters; ++i) {
l3d(nx, padded_nx, ny, nz, u1, u2);
foo = u1; u1 = u2; u2 = foo;
}
uint64_t gold = rdtsc() - t0;
double elapsed = gold / (ghz * 1e9);
double grid_size = nx * ny * nz;
double gflops = grid_size * iters * 6.0 / 1e9;
double gflops_sec = gflops / elapsed;
double traffic = grid_size * iters * 4 * 2.0 / 1e9;
double bw_realized = traffic / elapsed;
printf("laplace3d completed in %.4lf seconds\n", elapsed);
printf("GFLOPs/sec: %.1f\n", gflops_sec);
printf("BW realized: %.1f\n", bw_realized);
if (verify) {
// run serial version for verification
uint64_t st0 = rdtsc();
for (i = 0; i < iters; ++i) {
l3d_orig(nx, ny, nz, u1_orig, u2_orig);
foo = u1_orig; u1_orig = u2_orig; u2_orig = foo;
}
uint64_t ser = rdtsc() - st0;
elapsed = ser / (ghz * 1e9);
gflops_sec = gflops / elapsed;
bw_realized = traffic / elapsed;
printf("laplace3d_orig completed in %.2lf seconds\n", elapsed);
printf("GFLOPs/sec: %.1f\n", gflops_sec);
printf("BW realized: %.1f\n", bw_realized);
// verify
for (k = 0; k < nz; ++k) {
for (j = 0; j < ny; ++j) {
for (i = 0; i < nx; ++i) {
ind = i + j*nx + k*nx*ny;
p_ind = i + j*padded_nx + k*padded_nx*ny;
if (fabs(u1[p_ind] - u1_orig[ind]) > error_tol) {
printf("ERROR %f - %f [%d, %d, %d]\n",
u1[p_ind], u1_orig[ind], i, j, k);
goto done;
}
}
}
}
printf("verified, no error\n");
}
done:
_mm_free(u1_p);
_mm_free(u2_p);
_mm_free(u1_orig);
_mm_free(u2_orig);
return 0;
}
void l3d_naive(int nx, int padded_nx, int ny, int nz, float *u1, float *u2)
{
int i, j, k, ind;
const float sixth = 1.0f/6.0f;
/* compute on the grid */
#pragma omp parallel for private(i,j,k,ind)
for (k = 1; k < nz-1; ++k) {
for (j = 1; j < ny-1; ++j) {
#pragma ivdep
for (i = 1; i < nx-1; ++i) {
ind = i + j*padded_nx + k*padded_nx*ny;
u2[ind] =
( u1[ind-1 ] + u1[ind+1 ]
+ u1[ind-padded_nx ] + u1[ind+padded_nx ]
+ u1[ind-padded_nx*ny] + u1[ind+padded_nx*ny] ) * sixth;
}
}
}
}
void l3d_auto(int nx, int padded_nx, int ny, int nz, float *u1, float *u2)
{
int i, j, k, ind;
float sixth = 1.0f/6.0f;
#if defined(__INTEL_COMPILER)
__assume(padded_nx%8==0);
__assume_aligned(&u1[1],32);
__assume_aligned(&u2[1],32);
#elif defined(__GNUC__)
if (!(padded_nx%8==0))
__builtin_unreachable();
// third argument is the misalignment
u1 = __builtin_assume_aligned(u1, 32, sizeof(float));
u2 = __builtin_assume_aligned(u2, 32, sizeof(float));
#endif
/* compute on the grid */
#pragma omp parallel for private(i,j,k,ind)
for (k = 1; k < nz-1; ++k) {
for (j = 1; j < ny-1; ++j) {
#pragma vector nontemporal(u2)
for (i = 1; i < nx-1; ++i) {
ind = i + j*padded_nx + k*padded_nx*ny;
u2[ind] =
( u1[ind-1 ] + u1[ind+1 ]
+ u1[ind-padded_nx ] + u1[ind+padded_nx ]
+ u1[ind-padded_nx*ny] + u1[ind+padded_nx*ny] ) * sixth;
}
}
}
}
void l3d_sse(int nx, int padded_nx, int ny, int nz, float *u1, float *u2)
{
int i, j, k, ind;
float fsixth = 1.0f/6.0f;
__m128 sixth = _mm_set_ps1(fsixth);
/* compute on the grid */
#pragma omp parallel for private(i,j,k,ind)
for (k = 1; k < nz-1; ++k) {
for (j = 1; j < ny-1; ++j) {
for (i = 1; i < nx-1; i += 4) {
ind = i + j*padded_nx + k*padded_nx*ny;
__m128 pSrc1 = _mm_loadu_ps(&u1[ind-1]);
__m128 pSrc2 = _mm_loadu_ps(&u1[ind+1]);
__m128 pSrc3 = _mm_load_ps(&u1[ind-padded_nx]);
__m128 pSrc4 = _mm_load_ps(&u1[ind+padded_nx]);
__m128 pSrc5 = _mm_load_ps(&u1[ind-padded_nx*ny]);
__m128 pSrc6 = _mm_load_ps(&u1[ind+padded_nx*ny]);
__m128 sum1 = _mm_add_ps(pSrc1, pSrc2);
__m128 sum2 = _mm_add_ps(pSrc3, pSrc4);
__m128 sum3 = _mm_add_ps(pSrc5, pSrc6);
__m128 sum4 = _mm_add_ps(sum1, sum2);
__m128 vsum = _mm_add_ps(sum3, sum4);
vsum = _mm_mul_ps(vsum, sixth);
_mm_stream_ps(&u2[ind], vsum);
}
}
}
}
void l3d_avx(int nx, int padded_nx, int ny, int nz, float *u1, float *u2)
{
int i, j, k, ind;
float fsixth = 1.0f/6.0f;
__m256 sixth = _mm256_set1_ps(fsixth);
/* compute on the grid */
#pragma omp parallel for private(i,j,k,ind)
for (k = 1; k < nz-1; ++k) {
for (j = 1; j < ny-1; ++j) {
for (i = 1; i < nx-1; i += 8) {
ind = i + j*padded_nx + k*padded_nx*ny;
__m256 pSrc1 = _mm256_loadu_ps(&u1[ind-1]);
__m256 pSrc2 = _mm256_loadu_ps(&u1[ind+1]);
__m256 pSrc3 = _mm256_load_ps(&u1[ind-padded_nx]);
__m256 pSrc4 = _mm256_load_ps(&u1[ind+padded_nx]);
__m256 pSrc5 = _mm256_load_ps(&u1[ind-padded_nx*ny]);
__m256 pSrc6 = _mm256_load_ps(&u1[ind+padded_nx*ny]);
__m256 sum1 = _mm256_add_ps(pSrc1, pSrc2);
__m256 sum2 = _mm256_add_ps(pSrc3, pSrc4);
__m256 sum3 = _mm256_add_ps(pSrc5, pSrc6);
__m256 sum4 = _mm256_add_ps(sum1, sum2);
__m256 vsum = _mm256_add_ps(sum3, sum4);
vsum = _mm256_mul_ps(vsum, sixth);
_mm256_stream_ps(&u2[ind], vsum);
}
}
}
}
void l3d_orig(int nx, int ny, int nz, float *u1, float *u2)
{
int i, j, k, ind;
const float sixth = 1.0f/6.0f;
for (k = 0; k < nz; ++k) {
for (j = 0; j < ny; ++j) {
for (i = 0; i < nx; ++i) {
ind = i + j*nx + k*nx*ny;
if (i == 0 || i == nx - 1
|| j == 0 || j == ny - 1
|| k == 0 || k == nz - 1) {
u2[ind] = u1[ind]; // Dirichlet b.c.'s
}
else {
u2[ind] = ( u1[ind-1 ] + u1[ind+1 ]
+ u1[ind-nx ] + u1[ind+nx ]
+ u1[ind-nx*ny] + u1[ind+nx*ny] ) * sixth;
}
}
}
}
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) {
for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(4*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(4*t3+Nx-9,64));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
conv3x3s1_winograd23_transform_kernel_sse.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*4, inch, outch);
// G
const float ktm[4][3] = {
{ 1.0f, 0.0f, 0.0f},
{ 1.0f/2, 1.0f/2, 1.0f/2},
{ 1.0f/2, -1.0f/2, 1.0f/2},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i=0; i<4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<4; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<4; i++)
{
kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
}
|
GB_emult_02.c | //------------------------------------------------------------------------------
// GB_emult_02: C = A.*B where A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C = A.*B where A is sparse/hyper and B is bitmap/full constructs C with
// the same sparsity structure as A. This method can also be called with
// the two input matrices swapped, with flipxy true, to handle the case
// where A is bitmap/full and B is sparse/hyper.
// When no mask is present, or the mask is applied later, this method handles
// the following cases:
// ------------------------------------------
// C = A .* B
// ------------------------------------------
// sparse . sparse bitmap
// sparse . sparse full
// sparse . bitmap sparse
// sparse . full sparse
// If M is sparse/hyper and complemented, it is not passed here:
// ------------------------------------------
// C <!M>= A .* B
// ------------------------------------------
// sparse sparse sparse bitmap (mask later)
// sparse sparse sparse full (mask later)
// sparse sparse bitmap sparse (mask later)
// sparse sparse full sparse (mask later)
// If M is present, it is bitmap/full:
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// sparse bitmap sparse bitmap
// sparse bitmap sparse full
// sparse bitmap bitmap sparse
// sparse bitmap full sparse
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// sparse full sparse bitmap
// sparse full sparse full
// sparse full bitmap sparse
// sparse full full sparse
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// sparse bitmap sparse bitmap
// sparse bitmap sparse full
// sparse bitmap bitmap sparse
// sparse bitmap full sparse
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// sparse full sparse bitmap
// sparse full sparse full
// sparse full bitmap sparse
// sparse full full sparse
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_02 // C=A.*B when A is sparse/hyper, B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // optional mask, unused if NULL
const bool Mask_struct, // if true, use the only structure of M
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input A matrix (sparse/hyper)
const GrB_Matrix B, // input B matrix (bitmap/full)
GrB_BinaryOp op, // op to perform C = op (A,B)
bool flipxy, // if true use fmult(y,x) else fmult(x,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for emult_02", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_02", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_02", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_02", GB0) ;
ASSERT_TYPE_OK (ctype, "ctype for emult_02", GB0) ;
ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B)) ;
ASSERT (M == NULL || GB_IS_BITMAP (B) || GB_IS_FULL (B)) ;
int C_sparsity = GB_sparsity (A) ;
if (M == NULL)
{
GBURBLE ("emult_02:(%s=%s.*%s)",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
}
else
{
GBURBLE ("emult_02:(%s<%s%s%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
Mask_comp ? "!" : "",
GB_sparsity_char_matrix (M),
Mask_struct ? ",struct" : "",
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
}
//--------------------------------------------------------------------------
// revise the operator to handle flipxy
//--------------------------------------------------------------------------
// Replace the ANY operator with SECOND. ANY and SECOND give the same
// result if flipxy is false. However, SECOND is changed to FIRST if
// flipxy is true. This ensures that the results do not depend on the
// sparsity structures of A and B.
if (op->opcode == GB_ANY_opcode)
{
switch (op->xtype->code)
{
case GB_BOOL_code : op = GrB_SECOND_BOOL ; break ;
case GB_INT8_code : op = GrB_SECOND_INT8 ; break ;
case GB_INT16_code : op = GrB_SECOND_INT16 ; break ;
case GB_INT32_code : op = GrB_SECOND_INT32 ; break ;
case GB_INT64_code : op = GrB_SECOND_INT64 ; break ;
case GB_UINT8_code : op = GrB_SECOND_UINT8 ; break ;
case GB_UINT16_code : op = GrB_SECOND_UINT16 ; break ;
case GB_UINT32_code : op = GrB_SECOND_UINT32 ; break ;
case GB_UINT64_code : op = GrB_SECOND_UINT64 ; break ;
case GB_FP32_code : op = GrB_SECOND_FP32 ; break ;
case GB_FP64_code : op = GrB_SECOND_FP64 ; break ;
case GB_FC32_code : op = GxB_SECOND_FC32 ; break ;
case GB_FC64_code : op = GxB_SECOND_FC64 ; break ;
default: ;
}
}
if (flipxy)
{
bool handled ;
op = GB_flip_op (op, &handled) ;
if (handled) flipxy = false ;
}
ASSERT_BINARYOP_OK (op, "final op for emult_02", GB0) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int8_t *restrict Mb = (M == NULL) ? NULL : M->b ;
const GB_void *restrict Mx = (M == NULL || Mask_struct) ? NULL :
(const GB_void *) M->x ;
const size_t msize = (M == NULL) ? 0 : M->type->size ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t vlen = A->vlen ;
const int64_t vdim = A->vdim ;
const int64_t nvec = A->nvec ;
const int64_t anz = GB_nnz (A) ;
const int8_t *restrict Bb = B->b ;
const bool B_is_bitmap = GB_IS_BITMAP (B) ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, true, // sparse or hyper (same as A), static header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, A->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the input matrix A
//--------------------------------------------------------------------------
int A_nthreads, A_ntasks ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
GB_SLICE_MATRIX (A, 8, chunk) ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
C->nvec_nonempty = A->nvec_nonempty ;
C->nvec = nvec ;
const bool C_has_pattern_of_A = !B_is_bitmap && (M == NULL) ;
if (!C_has_pattern_of_A)
{
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + A_ntasks ;
Cp_kfirst = Work + A_ntasks * 2 ;
//----------------------------------------------------------------------
// count entries in C
//----------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
if (M == NULL)
{
//------------------------------------------------------------------
// Method2(a): C = A.*B where A is sparse/hyper and B is bitmap
//------------------------------------------------------------------
ASSERT (B_is_bitmap) ;
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pB_start = j * vlen ;
int64_t pA, pA_end ;
GB_get_pA (&pA, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, vlen) ;
int64_t cjnz = 0 ;
for ( ; pA < pA_end ; pA++)
{
cjnz += Bb [pB_start + Ai [pA]] ;
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
}
else
{
//------------------------------------------------------------------
// Method2(c): C<#M> = A.*B; M, B bitmap/full, A is sparse/hyper
//------------------------------------------------------------------
ASSERT (M != NULL) ;
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pB_start = j * vlen ;
int64_t pA, pA_end ;
GB_get_pA (&pA, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, vlen) ;
int64_t cjnz = 0 ;
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t pB = pB_start + i ;
bool mij = GBB (Mb, pB) && GB_mcast (Mx, pB, msize) ;
mij = mij ^ Mask_comp ;
cjnz += (mij && GBB (Bb, pB)) ;
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
}
//----------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//----------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, A_ek_slicing, A_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ;
}
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = (C_has_pattern_of_A) ? anz : Cp [nvec] ;
// set C->iso = C_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead of memcpy
if (GB_IS_HYPERSPARSE (A))
{
// copy A->h into C->h
GB_memcpy (C->h, Ah, nvec * sizeof (int64_t), A_nthreads) ;
}
if (C_has_pattern_of_A)
{
// Method2(b): B is full and no mask present, so the pattern of C is
// the same as the pattern of A
GB_memcpy (Cp, Ap, (nvec+1) * sizeof (int64_t), A_nthreads) ;
GB_memcpy (C->i, Ai, cnz * sizeof (int64_t), A_nthreads) ;
}
C->jumbled = A->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
// if flipxy was true on input and the op is positional, FIRST, SECOND, or
// PAIR, the op has already been flipped, so these tests do not have to
// consider that case.
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_opcode) ;
bool op_is_second = (opcode == GB_SECOND_opcode) ;
bool op_is_pair = (opcode == GB_PAIR_opcode) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
#define GB_PHASE_2_OF_2
bool done = false ;
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso
//----------------------------------------------------------------------
// Cx [0] = cscalar = op (A,B)
GB_BURBLE_MATRIX (C, "(iso emult) ") ;
memcpy (C->x, cscalar, csize) ;
// pattern of C = set intersection of pattern of A and B
// flipxy is ignored since the operator is not applied
#define GB_ISO_EMULT
#include "GB_emult_02_template.c"
done = true ;
}
else
{
#ifndef GBCOMPACT
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_AemultB_02(mult,xname) GB (_AemultB_02_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_02(mult,xname) (C, \
M, Mask_struct, Mask_comp, A, B, flipxy, \
Cp_kfirst, A_ek_slicing, A_ntasks, A_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
#endif
}
//--------------------------------------------------------------------------
// generic worker
//--------------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_02: %s) ", op->name) ;
int ewise_method = flipxy ? GB_EMULT_METHOD3 : GB_EMULT_METHOD2 ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, ewise_method, Cp_kfirst,
NULL, 0, 0, A_ek_slicing, A_ntasks, A_nthreads, NULL, 0, 0,
M, Mask_struct, Mask_comp, A, B, Context) ;
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "C output for emult_02", GB0) ;
return (GrB_SUCCESS) ;
}
|
GB_AxB_saxpy3_symbolic_template.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_symbolic_template: symbolic analysis for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Symbolic analysis for C=A*B, C<M>=A*B or C<!M>=A*B, via GB_AxB_saxpy3.
// Coarse tasks compute nnz (C (:,j)) for each of their vectors j. Fine tasks
// just scatter the mask M into the hash table. This phase does not depend on
// the semiring, nor does it depend on the type of C, A, or B. It does access
// the values of M, if the mask matrix M is present and not structural.
// If B is hypersparse, C must also be hypersparse.
// Otherwise, C must be sparse.
// The sparsity of A and B are #defined' constants for this method,
// as is the 3 cases of the mask (no M, M, or !M).
#include "GB_AxB_saxpy3.h"
#include "GB_AxB_saxpy3_template.h"
#include "GB_atomics.h"
#include "GB_bracket.h"
#include "GB_unused.h"
#define GB_META16
#include "GB_meta16_definitions.h"
void GB_EVAL2 (GB (AxB_saxpy3_sym), GB_MASK_A_B_SUFFIX)
(
GrB_Matrix C, // Cp is computed for coarse tasks
#if ( !GB_NO_MASK )
const GrB_Matrix M, // mask matrix M
const bool Mask_struct, // M structural, or not
const bool M_in_place,
#endif
const GrB_Matrix A, // A matrix; only the pattern is accessed
const GrB_Matrix B, // B matrix; only the pattern is accessed
GB_saxpy3task_struct *SaxpyTasks, // list of tasks, and workspace
const int ntasks, // total number of tasks
const int nfine, // number of fine tasks
const int nthreads // number of threads
)
{
//--------------------------------------------------------------------------
// get M, A, B, and C
//--------------------------------------------------------------------------
int64_t *restrict Cp = C->p ;
const int64_t cvlen = C->vlen ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int8_t *restrict Bb = B->b ;
const int64_t *restrict Bi = B->i ;
const int64_t bvlen = B->vlen ;
const bool B_jumbled = B->jumbled ;
ASSERT (GB_B_IS_SPARSE == GB_IS_SPARSE (B)) ;
ASSERT (GB_B_IS_HYPER == GB_IS_HYPERSPARSE (B)) ;
ASSERT (GB_B_IS_BITMAP == GB_IS_BITMAP (B)) ;
ASSERT (GB_B_IS_FULL == GB_IS_FULL (B)) ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int8_t *restrict Ab = A->b ;
const int64_t *restrict Ai = A->i ;
const int64_t anvec = A->nvec ;
const int64_t avlen = A->vlen ;
const bool A_jumbled = A->jumbled ;
ASSERT (GB_A_IS_SPARSE == GB_IS_SPARSE (A)) ;
ASSERT (GB_A_IS_HYPER == GB_IS_HYPERSPARSE (A)) ;
ASSERT (GB_A_IS_BITMAP == GB_IS_BITMAP (A)) ;
ASSERT (GB_A_IS_FULL == GB_IS_FULL (A)) ;
#if ( !GB_NO_MASK )
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mh = M->h ;
const int8_t *restrict Mb = M->b ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ;
size_t msize = M->type->size ;
int64_t mnvec = M->nvec ;
int64_t mvlen = M->vlen ;
const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ;
const bool M_is_bitmap = GB_IS_BITMAP (M) ;
const bool M_jumbled = GB_JUMBLED (M) ;
#endif
//==========================================================================
// phase1: count nnz(C(:,j)) for coarse tasks, scatter M for fine tasks
//==========================================================================
// At this point, all of Hf [...] is zero, for all tasks.
// Hi and Hx are not initialized.
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(static,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t hash_size = SaxpyTasks [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
if (taskid < nfine)
{
//------------------------------------------------------------------
// no work for fine tasks in phase1 if M is not present
//------------------------------------------------------------------
#if ( !GB_NO_MASK )
{
//--------------------------------------------------------------
// get the task descriptor
//--------------------------------------------------------------
int64_t kk = SaxpyTasks [taskid].vector ;
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ;
// no work to do if B(:,j) is empty
if (bjnz == 0) continue ;
// partition M(:,j)
GB_GET_M_j ; // get M(:,j)
int team_size = SaxpyTasks [taskid].team_size ;
int leader = SaxpyTasks [taskid].leader ;
int my_teamid = taskid - leader ;
int64_t mystart, myend ;
GB_PARTITION (mystart, myend, mjnz, my_teamid, team_size) ;
mystart += pM_start ;
myend += pM_start ;
if (use_Gustavson)
{
//----------------------------------------------------------
// phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B
//----------------------------------------------------------
// Scatter the values of M(:,j) into Hf. No atomics needed
// since all indices i in M(;,j) are unique. Do not
// scatter the mask if M(:,j) is a dense vector, since in
// that case the numeric phase accesses M(:,j) directly,
// not via Hf.
if (mjnz > 0)
{
int8_t *restrict
Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ;
GB_SCATTER_M_j (mystart, myend, 1) ;
}
}
else if (!M_in_place)
{
//----------------------------------------------------------
// phase1: fine hash task, C<M>=A*B or C<!M>=A*B
//----------------------------------------------------------
// If M_in_place is true, this is skipped. The mask
// M is dense, and is used in-place.
// The least significant 2 bits of Hf [hash] is the flag f,
// and the upper bits contain h, as (h,f). After this
// phase1, if M(i,j)=1 then the hash table contains
// ((i+1),1) in Hf [hash] at some location.
// Later, the flag values of f = 2 and 3 are also used.
// Only f=1 is set in this phase.
// h == 0, f == 0: unoccupied and unlocked
// h == i+1, f == 1: occupied with M(i,j)=1
int64_t *restrict
Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ;
int64_t hash_bits = (hash_size-1) ;
// scan my M(:,j)
for (int64_t pM = mystart ; pM < myend ; pM++)
{
GB_GET_M_ij (pM) ; // get M(i,j)
if (!mij) continue ; // skip if M(i,j)=0
int64_t i = GBI (Mi, pM, mvlen) ;
int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1)
for (GB_HASH (i))
{
int64_t hf ;
// swap my hash entry into the hash table;
// does the following using an atomic capture:
// { hf = Hf [hash] ; Hf [hash] = i_mine ; }
GB_ATOMIC_CAPTURE_INT64 (hf, Hf [hash], i_mine) ;
if (hf == 0) break ; // success
// i_mine has been inserted, but a prior entry was
// already there. It needs to be replaced, so take
// ownership of this displaced entry, and keep
// looking until a new empty slot is found for it.
i_mine = hf ;
}
}
}
}
#endif
}
else
{
//------------------------------------------------------------------
// coarse tasks: compute nnz in each vector of A*B(:,kfirst:klast)
//------------------------------------------------------------------
int64_t *restrict
Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ;
int64_t kfirst = SaxpyTasks [taskid].start ;
int64_t klast = SaxpyTasks [taskid].end ;
int64_t mark = 0 ;
if (use_Gustavson)
{
//--------------------------------------------------------------
// phase1: coarse Gustavson task
//--------------------------------------------------------------
#if ( GB_NO_MASK )
{
// phase1: coarse Gustavson task, C=A*B
#include "GB_AxB_saxpy3_coarseGus_noM_phase1.c"
}
#elif ( !GB_MASK_COMP )
{
// phase1: coarse Gustavson task, C<M>=A*B
#include "GB_AxB_saxpy3_coarseGus_M_phase1.c"
}
#else
{
// phase1: coarse Gustavson task, C<!M>=A*B
#include "GB_AxB_saxpy3_coarseGus_notM_phase1.c"
}
#endif
}
else
{
//--------------------------------------------------------------
// phase1: coarse hash task
//--------------------------------------------------------------
int64_t *restrict Hi = SaxpyTasks [taskid].Hi ;
int64_t hash_bits = (hash_size-1) ;
#if ( GB_NO_MASK )
{
//----------------------------------------------------------
// phase1: coarse hash task, C=A*B
//----------------------------------------------------------
#undef GB_CHECK_MASK_ij
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
}
#elif ( !GB_MASK_COMP )
{
//----------------------------------------------------------
// phase1: coarse hash task, C<M>=A*B
//----------------------------------------------------------
if (M_in_place)
{
//------------------------------------------------------
// M(:,j) is dense. M is not scattered into Hf.
//------------------------------------------------------
#undef GB_CHECK_MASK_ij
#define GB_CHECK_MASK_ij \
bool mij = \
(M_is_bitmap ? Mjb [i] : 1) && \
(Mask_struct ? 1 : (Mjx [i] != 0)) ; \
if (!mij) continue ;
switch (msize)
{
default:
case GB_1BYTE :
#undef M_TYPE
#define M_TYPE uint8_t
#undef M_SIZE
#define M_SIZE 1
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_2BYTE :
#undef M_TYPE
#define M_TYPE uint16_t
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_4BYTE :
#undef M_TYPE
#define M_TYPE uint32_t
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_8BYTE :
#undef M_TYPE
#define M_TYPE uint64_t
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_16BYTE :
#undef M_TYPE
#define M_TYPE uint64_t
#undef M_SIZE
#define M_SIZE 2
#undef GB_CHECK_MASK_ij
#define GB_CHECK_MASK_ij \
bool mij = \
(M_is_bitmap ? Mjb [i] : 1) && \
(Mask_struct ? 1 : \
(Mjx [2*i] != 0) || \
(Mjx [2*i+1] != 0)) ; \
if (!mij) continue ;
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
}
}
else
{
//------------------------------------------------------
// M is sparse and scattered into Hf
//------------------------------------------------------
#include "GB_AxB_saxpy3_coarseHash_M_phase1.c"
}
}
#else
{
//----------------------------------------------------------
// phase1: coarse hash task, C<!M>=A*B
//----------------------------------------------------------
if (M_in_place)
{
//------------------------------------------------------
// M(:,j) is dense. M is not scattered into Hf.
//------------------------------------------------------
#undef GB_CHECK_MASK_ij
#define GB_CHECK_MASK_ij \
bool mij = \
(M_is_bitmap ? Mjb [i] : 1) && \
(Mask_struct ? 1 : (Mjx [i] != 0)) ; \
if (mij) continue ;
switch (msize)
{
default:
case GB_1BYTE :
#undef M_TYPE
#define M_TYPE uint8_t
#undef M_SIZE
#define M_SIZE 1
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_2BYTE :
#undef M_TYPE
#define M_TYPE uint16_t
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_4BYTE :
#undef M_TYPE
#define M_TYPE uint32_t
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_8BYTE :
#undef M_TYPE
#define M_TYPE uint64_t
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
case GB_16BYTE :
#undef M_TYPE
#define M_TYPE uint64_t
#undef M_SIZE
#define M_SIZE 2
#undef GB_CHECK_MASK_ij
#define GB_CHECK_MASK_ij \
bool mij = \
(M_is_bitmap ? Mjb [i] : 1) && \
(Mask_struct ? 1 : \
(Mjx [2*i] != 0) || \
(Mjx [2*i+1] != 0)) ; \
if (mij) continue ;
#include "GB_AxB_saxpy3_coarseHash_phase1.c"
break ;
}
}
else
{
//------------------------------------------------------
// M is sparse and scattered into Hf
//------------------------------------------------------
#include "GB_AxB_saxpy3_coarseHash_notM_phase1.c"
}
}
#endif
}
}
}
//--------------------------------------------------------------------------
// check result for phase1 for fine tasks
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
#if ( !GB_NO_MASK )
{
for (taskid = 0 ; taskid < nfine ; taskid++)
{
int64_t kk = SaxpyTasks [taskid].vector ;
ASSERT (kk >= 0 && kk < B->nvec) ;
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ;
// no work to do if B(:,j) is empty
if (bjnz == 0) continue ;
int64_t hash_size = SaxpyTasks [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
int leader = SaxpyTasks [taskid].leader ;
if (leader != taskid) continue ;
GB_GET_M_j ; // get M(:,j)
if (mjnz == 0) continue ;
int64_t mjcount2 = 0 ;
int64_t mjcount = 0 ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
GB_GET_M_ij (pM) ; // get M(i,j)
if (mij) mjcount++ ;
}
if (use_Gustavson)
{
// phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B
int8_t *restrict
Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
GB_GET_M_ij (pM) ; // get M(i,j)
int64_t i = GBI (Mi, pM, mvlen) ;
ASSERT (Hf [i] == mij) ;
}
for (int64_t i = 0 ; i < cvlen ; i++)
{
ASSERT (Hf [i] == 0 || Hf [i] == 1) ;
if (Hf [i] == 1) mjcount2++ ;
}
ASSERT (mjcount == mjcount2) ;
}
else if (!M_in_place)
{
// phase1: fine hash task, C<M>=A*B or C<!M>=A*B
// h == 0, f == 0: unoccupied and unlocked
// h == i+1, f == 1: occupied with M(i,j)=1
int64_t *restrict
Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ;
int64_t hash_bits = (hash_size-1) ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
GB_GET_M_ij (pM) ; // get M(i,j)
if (!mij) continue ; // skip if M(i,j)=0
int64_t i = GBI (Mi, pM, mvlen) ;
int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1)
int64_t probe = 0 ;
for (GB_HASH (i))
{
int64_t hf = Hf [hash] ;
if (hf == i_mine)
{
mjcount2++ ;
break ;
}
ASSERT (hf != 0) ;
probe++ ;
ASSERT (probe < cvlen) ;
}
}
ASSERT (mjcount == mjcount2) ;
mjcount2 = 0 ;
for (int64_t hash = 0 ; hash < hash_size ; hash++)
{
int64_t hf = Hf [hash] ;
int64_t h = (hf >> 2) ; // empty (0), or a 1-based
int64_t f = (hf & 3) ; // 0 if empty or 1 if occupied
if (f == 1) ASSERT (h >= 1 && h <= cvlen) ;
ASSERT (hf == 0 || f == 1) ;
if (f == 1) mjcount2++ ;
}
ASSERT (mjcount == mjcount2) ;
}
}
}
#endif
#endif
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
fw_rdp_omp.c |
/*
Copy:
scp fw_rdp.cpp zafahmad@stampede2.tacc.utexas.edu:/home1/05072/zafahmad/SC_2019_submission/
Compile:
module load papi/5.5.1
icc -DDEBUG -O3 -fopenmp -xhost -AVX512 fw_rdp_omp.cpp -o fw_rdp -I$TACC_PAPI_INC -Wl,-rpath,$TACC_PAPI_LIB -L$TACC_PAPI_LIB -lpapi
icc -O3 -fopenmp -xhost -AVX512 fw_rdp_omp_poly_base.c -o exec -DDEBUG -DPOLYBENCH polybench-c-3.2/utilities/polybench.c -DPOLYBENCH_TIME -DSMALL_DATASET -Ipolybench-c-3.2/utilities/ -I. -I$TACC_PAPI_INC -Wl,-rpath,$TACC_PAPI_LIB -L$TACC_PAPI_LIB -lpapi
icc fw_rdp_omp.c -o fw_rdp -DDEBUG -DPOLYBENCH polybench-c-4.2/utilities/polybench.c -DPOLYBENCH_TIME -DPOLYBENCH_USE_RESTRICT -Ipolybench-c-4.2/utilities/ -I. -O2 -qopenmp -xKNL -qopt-prefetch=5 -xhost -AVX512 -lm
Execute:
./fw_rdp N B R P
./fw_rdp 1024 128 2 272
export GOMP_CPU_AFFINITY='0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,64,68,72,76,80,84,88,92,96,100,104,108,112,116,120,124,128,132,136,140,144,148,152,156,160,164,168,172,176,180,184,188,192,196,200,204,208,212,216,220,224,228,232,236,240,244,248,252,256,260,264,268'
export OMP_NUM_THREADS=68
export OMP_PROC_BIND=true
# export OMP_NUM_THREADS=272
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
// #include <iostream>
#include <omp.h>
#include "fw_rdp_omp.h"
// #include <cilk/cilk.h>
// #include <cilk/cilk_api.h>
// #include "cilktime.h"
#ifdef USE_PAPI
#include <papi.h>
#include "papilib.h"
#endif
#ifdef POLYBENCH
#include <polybench.h>
#endif
// using namespace std;
// Compile command: gcc -o ge ge.c -lm
// For the followings, I have tested:
/*
TEST #1) ./ge 16 4 2
TEST #2) ./ge 512 64 32
TEST #3) ./ge 1024 64 32
*/
/*
#define min(a, b) (a < b ? a : b)
#define max(a, b) (a > b ? a : b)
# ifndef DATA_TYPE
# define DATA_TYPE int
# define DATA_PRINTF_MODIFIER "%d "
# endif
*/
// #define CACHE_OPTIMIZED
#define CACHE_OPTIMIZEDX
/*
STEP 1) The input algorithm is the triply-nested for loop version passed to
PoCC to get parametric tiled version of the code
*/
int NN; // original size of the matrix
void fw(DATA_TYPE **D, int N) {
int i, j, k;
int counter = 0;
for (k = 0; k < N; ++k) {
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
// if (i > k && j >= k) {
/*printf("D[%d][%d] -= (D[%d][%d] * D[%d][%d]) / D[%d][%d]\n",
i, j, i, k, k, j, k, k);*/
D[i][j] = min(D[i][j], (D[i][k] + D[k][j]));
counter++;
// }
}
}
}
// printf("Total number of updates are: %d", counter);
}
/*
STEP 5) Recursive but applying index set splitting AND having multiple functions
*/
// in function A, X = U = V = W --> The least parallel one
void fw_rec3_A(DATA_TYPE *X, int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub);
// in function B, X = V but X != U and X != W
void fw_rec3_B(DATA_TYPE *X, DATA_TYPE *U, DATA_TYPE *W,
int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub);
// in function C, X = U but X != V and X != W
void fw_rec3_C(DATA_TYPE *X, DATA_TYPE *V, DATA_TYPE *W,
int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub);
// in function D, X != U and X != V and X != W --> The most parallel one
void fw_rec3_D(DATA_TYPE *X, DATA_TYPE *U, DATA_TYPE *V, DATA_TYPE *W,
int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub);
void fw_D(DATA_TYPE *X, DATA_TYPE *U, DATA_TYPE *V, int N, int R){
NN = N;
//int R = RWAY;
int base_size = BASESIZE;
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
fw_rec3_D(X, U, V, X, N, R, base_size, 0, N, 0, N, 0, N);
}
}
}
void fw_C(DATA_TYPE *X, DATA_TYPE *V, int N, int R){
NN = N;
//int R = RWAY;
int base_size = BASESIZE;
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
fw_rec3_C(X, V, X, N, R, base_size, 0, N, 0, N, 0, N);
}
}
}
void fw_B(DATA_TYPE *X, DATA_TYPE *U, int N, int R){
NN = N;
//int R = RWAY;
int base_size = BASESIZE;
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
fw_rec3_B(X, U, X, N, R, base_size, 0, N, 0, N, 0, N);
}
}
}
void fw_A(DATA_TYPE *X, int N, int R){
NN = N;
//int R = RWAY;
int base_size = BASESIZE;
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
printf("Number of threads: %d -----------------------------------------------------------", omp_get_num_threads());
fw_rec3_A(X, N, R, base_size, 0, N, 0, N, 0, N);
}
}
}
// in function D, X != U and X != V and X != W --> The most parallel one
void fw_rec3_D(DATA_TYPE *X, DATA_TYPE *U, DATA_TYPE *V, DATA_TYPE *W,
int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub) {
if (k_lb >= NN || i_lb >= NN || j_lb >= NN)
return ;
// printf("N: %d NN: %d R: %d base: %d klb: %d kub: %d ilb: %d iub: %d jlb: %d jub: %d\n", N, NN, R, base_size, k_lb, k_ub,
// i_lb, i_ub, j_lb, j_ub);
int i, j, k;
// base case
if ((k_ub - k_lb) <= base_size || N <= R) {
#ifdef USE_PAPI
int id = tid();
papi_for_thread(id);
int retval = 0;
if ( (retval=PAPI_start(EventSet[id])) != PAPI_OK)
ERROR_RETURN(retval);
#endif
#ifndef CACHE_OPTIMIZEDX
for (k = k_lb; k < k_ub && k < NN; ++k) {
for (i = i_lb; i < i_ub && i < NN; ++i) {
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (U[i][k] * V[k][j])/W[k][k];
// X[i][j] = min(X[i][j], (U[i][k] + V[k][j]));
X[i*NN + j] = min(X[i*NN + j], (U[i*NN + k] + V[k*NN + j]));
// }
}
}
}
#else
DATA_TYPE u_col_major[base_size * base_size], v_row_major[base_size * base_size];
DATA_TYPE x_row_major[base_size * base_size];
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j)
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = X[i*NN+j]; //X[i][j];
for (i = i_lb; i < i_ub && i < NN; ++i)
for (k = k_lb; k < k_ub && k < NN; ++k)
u_col_major[(k-k_lb)*base_size + (i-i_lb)] = U[i*NN+k]; //U[i][k];
for (k = k_lb; k < k_ub && k < NN; ++k)
for (j = j_lb; j < j_ub && j < NN; ++j)
v_row_major[(k-k_lb)*base_size + (j-j_lb)] = V[k*NN+j]; //V[k][j];
for (k = k_lb; k < k_ub && k < NN; ++k) {
DATA_TYPE w_kk = W[k*NN+k]; //W[k][k];
for (i = i_lb; i < i_ub && i < NN; ++i) {
// DATA_TYPE div_ik_kk = U[i][k]/w_kk;
DATA_TYPE div_ik_kk = u_col_major[(k - k_lb) * base_size + (i - i_lb)]; ///w_kk;
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (U[i][k] * V[k][j])/W[k][k];
// X[i][j] -= div_ik_kk * v_row_major[(k - k_lb) * base_size + (j - j_lb)];
// x_row_major[(i-i_lb)*base_size + (j-j_lb)] -= div_ik_kk * v_row_major[(k - k_lb) * base_size + (j - j_lb)];
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = min(x_row_major[(i-i_lb)*base_size + (j-j_lb)],
(div_ik_kk + v_row_major[(k - k_lb) * base_size + (j - j_lb)]));
// }
}
}
}
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j){
X[i*NN+j] = x_row_major[(i-i_lb)*base_size + (j-j_lb)]; //X[i][j]
}
#endif
#ifdef USE_PAPI
countMisses(id );
#endif
return;
}
int ii, jj, kk;
int tile_size = N/R;
for (kk = 0; kk < R && k_lb + kk * (tile_size) < NN; kk++) {
// All the following fw_rec3_D(...) functions can run in parallel.
// IN PARALLEL:
// Only possible case is this as all the input/read tiles are different than output/write tile
// #pragma omp parallel
{
// #pragma omp for collapse(2)
for (ii = 0; ii < R && i_lb + ii * (tile_size) < NN; ii++) {
for (jj = 0; jj < R && j_lb + jj * (tile_size) < NN; jj++) {
#pragma omp task
fw_rec3_D(X, U, V, W, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + ii * (tile_size), i_lb + (ii + 1) * tile_size,
j_lb + jj * (tile_size), j_lb + (jj + 1) * tile_size);
}
}
}
#pragma omp taskwait
// JOIN - SYNC
}
}
// in function C, X = U but X != V and X != W
void fw_rec3_C(DATA_TYPE *X, DATA_TYPE *V, DATA_TYPE *W,
int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub) {
int i, j, k;
// printf("N: %d NN: %d R: %d base: %d klb: %d kub: %d ilb: %d iub: %d jlb: %d jub: %d\n", N, NN, R, base_size, k_lb, k_ub,
// i_lb, i_ub, j_lb, j_ub);
if (k_lb >= NN || i_lb >= NN || j_lb >= NN)
return ;
// base case
if ((k_ub - k_lb) <= base_size || N <= R) {
#ifdef USE_PAPI
int id = tid();
papi_for_thread(id);
int retval = 0;
if ( (retval=PAPI_start(EventSet[id])) != PAPI_OK)
ERROR_RETURN(retval);
#endif
#ifndef CACHE_OPTIMIZEDX
for (k = k_lb; k < k_ub && k < NN; ++k) {
for (i = i_lb; i < i_ub && i < NN; ++i) {
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (X[i][k] * V[k][j])/W[k][k];
// X[i][j] = min(X[i][j], (X[i][k] + V[k][j])); ///W[k][k];
X[i*NN + j] = min(X[i*NN + j], (X[i*NN + k] + V[k*NN + j]));
// }
}
}
}
#else
DATA_TYPE v_row_major[base_size * base_size];
DATA_TYPE x_row_major[base_size * base_size];
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j)
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = X[i*NN+j]; //X[i][j];
for (k = k_lb; k < k_ub && k < NN; ++k)
for (j = j_lb; j < j_ub && j < NN; ++j)
v_row_major[(k-k_lb)*base_size + (j-j_lb)] = V[k*NN+j]; //V[k][j];
for (k = k_lb; k < k_ub && k < NN; ++k) {
DATA_TYPE w_kk = W[k*NN+k]; //W[k][k];
for (i = i_lb; i < i_ub && i < NN; ++i) {
// DATA_TYPE div_ik_kk = U[i][k]/w_kk;
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (U[i][k] * V[k][j])/W[k][k];
// X[i][j] -= div_ik_kk * v_row_major[(k - k_lb) * base_size + (j - j_lb)];
// x_row_major[(i-i_lb)*base_size + (j-j_lb)] -= x_row_major[(i-i_lb)*base_size + (k-k_lb)]
// * v_row_major[(k - k_lb) * base_size + (j - j_lb)] / w_kk;
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = min (x_row_major[(i-i_lb)*base_size + (j-j_lb)], (x_row_major[(i-i_lb)*base_size + (k-k_lb)]
+ v_row_major[(k - k_lb) * base_size + (j - j_lb)])); // / w_kk;
// }
}
}
}
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j){
X[i*NN+j] = x_row_major[(i-i_lb)*base_size + (j-j_lb)]; //X[i][j]
}
#endif
#ifdef USE_PAPI
countMisses(id );
#endif
return;
}
int ii, jj, kk;
int tile_size = N/R;
for (kk = 0; kk < R && k_lb + kk * (tile_size) < NN; kk++) {
// Applying the same idea of index set Splitting
// All the following fw_rec3_C(...) functions can run in parallel.
// IN PARALLEL:
// CASE 1: kk = jj
// #pragma omp parallel
{
// #pragma omp for
for (ii = 0; ii < R && i_lb + ii * (tile_size) < NN; ii++) {
#pragma omp task
fw_rec3_C(X, V, W, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + ii * (tile_size), i_lb + (ii + 1) * tile_size,
j_lb + kk * (tile_size), j_lb + (kk + 1) * tile_size);
}
}
#pragma omp taskwait
// JOIN - SYNC
// All the following fw_rec3_D(...) functions can run in parallel.
// IN PARALLEL:
// else of CASE 1
// CASE 2: kk != ii and kk != jj ==> Function D(X, U, V, ...)
// #pragma omp parallel
{
// #pragma omp for collapse(2)
for (ii = 0; ii < R && i_lb + ii * (tile_size) < NN; ii++) {
for (jj = 0; jj < R && j_lb + jj * (tile_size) < NN; jj++) {
if (jj == kk) continue;
#pragma omp task
fw_rec3_D(X, X, V, W, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + ii * (tile_size), i_lb + (ii + 1) * tile_size,
j_lb + jj * (tile_size), j_lb + (jj + 1) * tile_size);
}
}
}
#pragma omp taskwait
// JOIN - SYNC
}
}
// in function B, X = V but X != U and X != W
void fw_rec3_B(DATA_TYPE *X, DATA_TYPE *U, DATA_TYPE *W,
int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub) {
int i, j, k;
if (k_lb >= NN || i_lb >= NN || j_lb >= NN)
return ;
// printf("N: %d NN: %d R: %d base: %d klb: %d kub: %d ilb: %d iub: %d jlb: %d jub: %d\n", N, NN, R, base_size, k_lb, k_ub,
// i_lb, i_ub, j_lb, j_ub);
// base case
if ((k_ub - k_lb) <= base_size || N <= R) {
#ifdef USE_PAPI
int id = tid();
papi_for_thread(id);
int retval = 0;
if ( (retval=PAPI_start(EventSet[id])) != PAPI_OK)
ERROR_RETURN(retval);
#endif
#ifndef CACHE_OPTIMIZEDX
for (k = k_lb; k < k_ub && k < NN; ++k) {
for (i = i_lb; i < i_ub && i < NN; ++i) {
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (U[i][k] * X[k][j])/W[k][k];
X[i*NN + j] = min(X[i*NN + j], (U[i*NN + k] + X[k*NN + j])); ///W[k][k];
// }
}
}
}
#else
DATA_TYPE u_col_major[base_size * base_size];
DATA_TYPE x_row_major[base_size * base_size];
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j)
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = X[i*NN+j]; //X[i][j];
for (i = i_lb; i < i_ub && i < NN; ++i)
for (k = k_lb; k < k_ub && k < NN; ++k)
u_col_major[(k-k_lb)*base_size + (i-i_lb)] = U[i*NN+k]; //U[i][k];
for (k = k_lb; k < k_ub && k < NN; ++k) {
DATA_TYPE w_kk = W[k*NN+k]; //W[k][k];
for (i = i_lb; i < i_ub && i < NN; ++i) {
// DATA_TYPE div_ik_kk = U[i][k]/w_kk;
DATA_TYPE div_ik_kk = u_col_major[(k - k_lb) * base_size + (i - i_lb)]; ///w_kk;
//int j_ub_min = min(j_ub, NN);
// #pragma GCC ivdep
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (U[i][k] * V[k][j])/W[k][k];
// X[i][j] -= div_ik_kk * x_row_major[(k - k_lb) * base_size + (j - j_lb)];
// x_row_major[(i-i_lb)*base_size + (j-j_lb)] -= div_ik_kk * x_row_major[(k - k_lb) * base_size + (j - j_lb)];
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = min (x_row_major[(i-i_lb)*base_size + (j-j_lb)], (div_ik_kk + x_row_major[(k - k_lb) * base_size + (j - j_lb)]));
// }
}
}
}
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j)
X[i*NN+j] = x_row_major[(i-i_lb)*base_size + (j-j_lb)]; //X[i][j]
#endif
#ifdef USE_PAPI
countMisses(id );
#endif
return;
}
int ii, jj, kk;
int tile_size = N/R;
for (kk = 0; kk < R && k_lb + kk * (tile_size) < NN; kk++) {
// Applying the same idea of index set Splitting
// All the following fw_rec3_B(...) functions can run in parallel.
// IN PARALLEL:
// CASE 1: kk = ii but kk != jj ==> Function B(X, U, W, ...)
// #pragma omp parallel
{
// #pragma omp for
for (jj = 0; jj < R && j_lb + jj * (tile_size) < NN; jj++) {
#pragma omp task
fw_rec3_B(X, U, W, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + kk * (tile_size), i_lb + (kk + 1) * tile_size,
j_lb + jj * (tile_size), j_lb + (jj + 1) * tile_size);
}
}
#pragma omp taskwait
// JOIN - SYNC
// All the following fw_rec3_D(...) functions can run in parallel.
// IN PARALLEL:
// else of CASE 1
// CASE 2: kk != ii ==> Function D(X, U, V, W, ...)
// #pragma omp parallel
{
// #pragma omp for collapse(2)
for (ii = 0; ii < R && i_lb + ii * (tile_size) < NN; ii++) {
if (ii == kk) continue;
for (jj = 0; jj < R && j_lb + jj * (tile_size) < NN; jj++) {
#pragma omp task
fw_rec3_D(X, U, X, W, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + ii * (tile_size), i_lb + (ii + 1) * tile_size,
j_lb + jj * (tile_size), j_lb + (jj + 1) * tile_size);
}
}
}
#pragma omp taskwait
// JOIN - SYNC
}
}
// in function A, X = U = V = W --> The least parallel one
void fw_rec3_A(DATA_TYPE *X, int N, int R, int base_size,
int k_lb, int k_ub, int i_lb, int i_ub,
int j_lb, int j_ub) {
if (k_lb >= NN || i_lb >= NN || j_lb >= NN)
return ;
// printf("N: %d NN: %d R: %d base: %d klb: %d kub: %d ilb: %d iub: %d jlb: %d jub: %d\n", N, NN, R, base_size, k_lb, k_ub,
// i_lb, i_ub, j_lb, j_ub);
int i, j, k;
// base case
if ((k_ub - k_lb) <= base_size || N <= R) {
#ifdef USE_PAPI
int id = tid();
papi_for_thread(id);
int retval = 0;
if ( (retval=PAPI_start(EventSet[id])) != PAPI_OK)
ERROR_RETURN(retval);
#endif
#ifndef CACHE_OPTIMIZEDX
// printf("N: %d NN: %d R: %d base: %d klb: %d kub: %d ilb: %d iub: %d jlb: %d jub: %d\n", N, NN, R, base_size, k_lb, k_ub,
// i_lb, i_ub, j_lb, j_ub);
for (k = k_lb; k < k_ub && k < NN; ++k) {
for (i = i_lb; i < i_ub && i < NN; ++i) {
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (X[i][k] * X[k][j])/X[k][k];
X[i*NN + j] = min(X[i*NN + j], (X[i*NN + k] + X[k*NN + j])); ///X[k][k];
// (*counter)++;
// }
}
}
}
#else
DATA_TYPE x_row_major[base_size * base_size];
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j)
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = X[i*NN+j]; //X[i][j];
for (k = k_lb; k < k_ub && k < NN; ++k) {
for (i = i_lb; i < i_ub && i < NN; ++i) {
for (j = j_lb; j < j_ub && j < NN; ++j) {
// if (i > k && j >= k) {
// X[i][j] -= (U[i][k] * V[k][j])/W[k][k];
// x_row_major[(i-i_lb)*base_size + (j-j_lb)] -= x_row_major[(i - i_lb)*base_size + (k - k_lb)] *
// x_row_major[(k - k_lb) * base_size + (j - j_lb)] /
// x_row_major[(k - k_lb) * base_size + (k - k_lb)];
x_row_major[(i-i_lb)*base_size + (j-j_lb)] = min (x_row_major[(i-i_lb)*base_size + (j-j_lb)],
x_row_major[(i - i_lb)*base_size + (k - k_lb)] +
x_row_major[(k - k_lb) * base_size + (j - j_lb)]);
// / x_row_major[(k - k_lb) * base_size + (k - k_lb)];
// }
}
}
}
for (i = i_lb; i < i_ub && i < NN; ++i)
for (j = j_lb; j < j_ub && j < NN; ++j)
X[i*NN+j] = x_row_major[(i-i_lb)*base_size + (j-j_lb)]; //X[i][j]
#endif
#ifdef USE_PAPI
countMisses(id );
#endif
return;
}
int ii, jj, kk;
int tile_size = N/R;
for (kk = 0; kk < R && k_lb + kk * (tile_size) < NN; kk++) {
// Applying the same idea of index set Splitting
// CASE 1: kk = ii and kk = jj --> Function A(X, ...)
fw_rec3_A(X, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + kk * (tile_size), i_lb + (kk + 1) * tile_size,
j_lb + kk * (tile_size), j_lb + (kk + 1) * tile_size);
// following functions fw_rec3_B(...) can be in parallel with all the
// function calls fw_rec3_C(...) as they are writing to different tiles.
// IN PARALLEL:
// CASE 2: kk = ii but kk != jj ==> Function B(X, U, ...)
// #pragma omp parallel
{
// #pragma omp for
for (jj = 0; jj < R && j_lb + jj * (tile_size) < NN; jj++) {
if (jj == kk) continue;
#pragma omp task
fw_rec3_B(X, X, X, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + kk * (tile_size), i_lb + (kk + 1) * tile_size,
j_lb + jj * (tile_size), j_lb + (jj + 1) * tile_size);
}
}
#pragma omp taskwait
// CASE 3: kk = jj but kk != ii ==> Function C(X, V, ...)
// #pragma omp parallel
{
// #pragma omp for
for (ii = 0; ii < R && i_lb + ii * (tile_size) < NN; ii++) {
if (ii == kk) continue;
#pragma omp task
fw_rec3_C(X, X, X, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + ii * (tile_size), i_lb + (ii + 1) * tile_size,
j_lb + kk * (tile_size), j_lb + (kk + 1) * tile_size);
}
}
#pragma omp taskwait
// JOIN - SYNC
// All the following fw_rec3_D(...) functions can run in parallel.
// IN PARALLEL:
// CASE 4: kk != ii and kk != jj ==> Function D(X, U, V, ...)
// #pragma omp parallel
{
// #pragma omp for collapse(2)
for (ii = 0; ii < R && i_lb + ii * (tile_size) < NN; ii++) {
for (jj = 0; jj < R && j_lb + jj * (tile_size) < NN; jj++) {
if (ii == kk || jj == kk) continue;
#pragma omp task
fw_rec3_D(X, X, X, X, tile_size, R, base_size,
k_lb + kk * (tile_size), k_lb + (kk + 1) * tile_size,
i_lb + ii * (tile_size), i_lb + (ii + 1) * tile_size,
j_lb + jj * (tile_size), j_lb + (jj + 1) * tile_size);
}
}
}
#pragma omp taskwait
// JOIN - SYNC
}
}
/*
void fw_rec_top_level3(DATA_TYPE **D, int N, int R, int base_size) {
int ii, jj, kk;
int i, j, k;
int tile_size = N/R;
// #pragma scop
// cout << "N: " << N << " R: " << R << " tile_size: " << tile_size << endl;
for (kk = 0; kk < R && kk * (tile_size) < NN; kk++) {
// cout << kk << " : before a" << endl;
// Applying index set Splitting
// CASE 1: kk = ii and kk = jj --> Function A(X, ...)
fw_rec3_A(D, tile_size, R, base_size,
kk * (tile_size), (kk + 1) * tile_size,
kk * (tile_size), (kk + 1) * tile_size,
kk * (tile_size), (kk + 1) * tile_size);
// following functions fw_rec3_B(...) can be in parallel with all the
// function calls fw_rec3_C(...) as they are writing to different tiles.
// IN PARALLEL:
// cout << k << " : before b" << endl;
// CASE 2: kk = ii but kk != jj ==> Function B(X, U, ...)
// #pragma omp parallel
{
// #pragma omp for
for (jj = 0; jj < R && jj * tile_size < NN; jj++) {
if (jj == kk) continue;
#pragma omp task
fw_rec3_B(D, D, D, tile_size, R, base_size,
kk * (tile_size), (kk + 1) * tile_size,
kk * (tile_size), (kk + 1) * tile_size,
jj * (tile_size), (jj + 1) * tile_size);
}
}
// cout << k << " : before c" << endl;
// CASE 3: kk = jj but kk != ii ==> Function C(X, V, ...)
// #pragma omp parallel
{
// #pragma omp for
for (ii = 0; ii < R && ii * tile_size < NN; ii++) {
if (ii == kk) continue;
#pragma omp task
fw_rec3_C(D, D, D, tile_size, R, base_size,
kk * (tile_size), (kk + 1) * tile_size,
ii * (tile_size), (ii + 1) * tile_size,
kk * (tile_size), (kk + 1) * tile_size);
}
}
#pragma omp taskwait
// JOIN - SYNC
// cout << k << " : before d" << endl;
// All the following fw_rec3_D(...) functions can run in parallel.
// IN PARALLEL:
// CASE 4: kk != ii and kk != jj ==> Function D(X, U, V, ...)
// #pragma omp parallel
{
// #pragma omp for collapse(2)
for (ii = 0; ii < R && ii * tile_size < NN; ii++) {
// #pragma omp for
for (jj = 0; jj < R && jj * tile_size < NN; jj++) {
if (ii == kk || jj == kk) continue;
#pragma omp task
fw_rec3_D(D, D, D, D, tile_size, R, base_size,
kk * (tile_size), (kk + 1) * tile_size,
ii * (tile_size), (ii + 1) * tile_size,
jj * (tile_size), (jj + 1) * tile_size);
}
}
}
#pragma omp taskwait
// JOIN - SYNC
}
// #pragma endscop
// printf("Total number of updates are: %d\n", counter);
}
*/
void print_arr(DATA_TYPE **arr, int N){
int i, j;
printf("---------ARR----------\n");
for (i=0; i < N; i++){
for (j = 0; j < N; j++)
printf("%d\t", arr[i][j]);
printf("\n");
}
}
/*
int main(int argc, char **argv) {
int i, j;
NN = 1024;
if (argc > 1){
NN = atoi(argv[1]);
}
int N = 2;
while (N < NN)
N = (N << 1);
int B = 32;
if (argc > 2)
B = atoi(argv[2]);
int base_size = B;
int R = 2;
if (argc > 3)
R = atoi(argv[3]);
// making sure virtual padding will give the desired base case sizes
// only for power of 2 base case sizes
// otherwise it should be commented
int RR = 1;
while (N / RR > B)
RR *= R;
N = RR * B;
// End of extra virtual padding for base case
#ifdef USE_PAPI
papi_init();
#endif
if (argc > 4) {
omp_set_num_threads(atoi(argv[4]));
// if (0 != __cilkrts_set_param("nworkers", argv[3])) {
// printf("Failed to set worker count\n");
// return 1;
// }
}
// int P = __cilkrts_get_nworkers();
// printf("%d,", __cilkrts_get_nworkers());
DATA_TYPE **D_serial = (DATA_TYPE **)malloc(NN * sizeof(DATA_TYPE *));
DATA_TYPE **D_recursive3 = (DATA_TYPE **)malloc(NN * sizeof(DATA_TYPE *));
for (i = 0; i < NN; ++i) {
D_serial[i] = (DATA_TYPE *)malloc(NN * sizeof(DATA_TYPE));
D_recursive3[i] = (DATA_TYPE *)malloc(NN * sizeof(DATA_TYPE));
for (j = 0; j < NN; ++j) {
// D_serial[i][j] = rand() % 100 + 1;
D_serial[i][j] = ((DATA_TYPE) (i+1)*(j+1)) / N;
D_recursive3[i][j] = D_serial[i][j];
}
}
// printf("STEP 1:\n");
// print_arr(D_serial, NN);
#ifdef DEBUG
unsigned long long tstart_serial = time(NULL);
fw(D_serial, NN);
unsigned long long tend_serial = time(NULL);
// // // // // // // // // cout << "serial: " << tend_serial - tstart_serial << endl;
#endif
// printf("%d,", base_size);
unsigned long long tstart = time(NULL);
// printf("\n\nSTEP 5:\n");
// printf("\nNOW HAVING MULTIPLE FUNCTIONS AS A RESULT OF INDEX SET SPLITTING\n\n");
#ifdef POLYBENCH
// Start timer.
polybench_start_instruments;
#endif
// print_arr(D_recursive3, NN);
int P = 0;
#pragma omp parallel
{
// P = omp_num_procs();
P = omp_get_max_threads();
#pragma omp single
{
#pragma omp task
fw_rec_top_level3(D_recursive3, N, R, base_size);
// fw_rec3_A(D_recursive3, N, R, base_size, 0, N, 0, N, 0, N);
}
}
#ifdef POLYBENCH
// Stop and print timer.
polybench_stop_instruments;
polybench_print_instruments;
#endif
unsigned long long tend = time(NULL);
// printf("%d,%f,", N, cilk_ticks_to_seconds(tend - tstart));
// cout << R << "," << N << "," << B << ","
// << P << "," << (tend - tstart);
// printf("%d, %d, %d, %d, %lld\n", R, N, B, P, (tend - tstart));
#ifdef USE_PAPI
countTotalMiss(p);
PAPI_shutdown();
delete threadcounter;
for (int i = 0; i < p; i++) delete l2miss[i];
delete l2miss;
delete errstring;
delete EventSet;
delete eventCode;
#endif
// print_arr(D_serial, NN);
// print_arr(D_recursive3, NN);
for (i = 0; i < NN; ++i) {
for (j = 0; j < NN; ++j) {
#ifdef DEBUG
if (D_serial[i][j] != D_recursive3[i][j]) {
printf("WE HAVE ISSUE IN THE RECURSIVE PROGRAM 3\n");
}
#endif
}
free(D_serial[i]);
free(D_recursive3[i]);
}
free(D_serial);
free(D_recursive3);
// printf("\n");
return 0;
}
*/
|
clang-262701.c | #include <stdio.h>
#include <string.h>
#include <omp.h>
#define THREADS 2
#define TEAMS 2
int main(){
int gpu_results[TEAMS];
int correct_results[TEAMS];
int actual_num_threads = -1;
// the runtime is allowed to use <=THREADS in the parallel regions and
// it actually chooses 1 (new runtime) or 2 (old runtime)
#pragma omp target teams thread_limit(THREADS) num_teams(TEAMS) \
map(from:gpu_results, actual_num_threads)
{
int dist[THREADS];
// Uncomment line below to trigger generic kernel before fix was in place
//dist[0] = 0;
#pragma omp parallel
{
int thread = omp_get_thread_num();
int team = omp_get_team_num();
dist[thread] = 0;
#pragma omp barrier
dist[thread] += 1;
#pragma omp barrier
if(thread == 0) {
if (team == 0)
actual_num_threads = omp_get_num_threads();
for(int i = 1; i < THREADS; i++)
dist[0] += dist[i];
gpu_results[team] = dist[0];
}
}
}
for(int i = 0; i < TEAMS; i++)
correct_results[i] = actual_num_threads;
int status = memcmp(correct_results, gpu_results, TEAMS * sizeof(int));
if (status != 0 || actual_num_threads > 2) {
printf("FAIL\n");
return 1;
}
printf("PASS\n");
return 0;
}
|
scatter_avx2.c | // create a list of 64 numbers, and only sum the even ones
#include <stdio.h>
#include <stdlib.h>
#define N 32000
#define SCALE 8
int main() {
srand(time(NULL));
float *numbers = malloc(sizeof(float)*N);
float *result1 = malloc(sizeof(float)*N);
float *result2 = malloc(sizeof(float)*N);
int *mask = malloc(sizeof(int)*N);
// Init the numbers
for (int i = 0; i<N; i++) numbers[i] = rand() % 10;
for (int i = 0; i<N; i++) { result1[i] = 0; result2[i] = 0; }
for (int i = 0; i<N; i++) mask[i] = rand() % N;
for (int i = 0; i<SCALE; i++) printf("%.1f ", numbers[i]);
puts("\n---");
for (int i = 0; i<SCALE; i++) printf("%d ", mask[i]);
puts("\n---");
puts("---------------------------------------------");
//Serial
for (int i = 0; i<SCALE; i++) {
result1[mask[i]] = numbers[mask[i]];
}
#pragma omp simd simdlen(SCALE)
for (int i = 0; i<SCALE; i++) {
result2[mask[i]] = numbers[mask[i]];
}
// print
for (int i = 0; i<SCALE; i++) printf("%.1f ", result1[i]);
puts("\n---");
for (int i = 0; i<SCALE; i++) printf("%.1f ", result2[i]);
puts("\n---");
int errors = 0;
for (int i = 0; i<SCALE; i++) {
if (result1[i] != result2[i]) ++errors;
}
printf("Errors: %d\n", errors);
//printf("Result1: %f | Result2: %f\n", result1, result2);
return 0;
}
|
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Typedef declaractions.
*/
typedef struct _ConstituteInfo
{
const char
*caption,
*comment,
*dispose,
*label;
MagickBooleanType
sync_from_exif,
sync_from_tiff;
MagickStatusType
delay_flags;
size_t
delay;
ssize_t
ticks_per_second;
} ConstituteInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
switch (storage)
{
case CharPixel: image->depth=8*sizeof(unsigned char); break;
case DoublePixel: image->depth=8*sizeof(double); break;
case FloatPixel: image->depth=8*sizeof(float); break;
case LongPixel: image->depth=8*sizeof(unsigned long); break;
case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break;
case ShortPixel: image->depth=8*sizeof(unsigned short); break;
default: break;
}
length=strlen(map);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (length == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
static void InitializeConstituteInfo(const ImageInfo *image_info,
ConstituteInfo *constitute_info)
{
const char
*option;
memset(constitute_info,0,sizeof(*constitute_info));
constitute_info->sync_from_exif=MagickTrue;
constitute_info->sync_from_tiff=MagickTrue;
option=GetImageOption(image_info,"exif:sync-image");
if (IsStringFalse(option) != MagickFalse)
constitute_info->sync_from_exif=MagickFalse;
option=GetImageOption(image_info,"tiff:sync-image");
if (IsStringFalse(option) != MagickFalse)
constitute_info->sync_from_tiff=MagickFalse;
constitute_info->caption=GetImageOption(image_info,"caption");
constitute_info->comment=GetImageOption(image_info,"comment");
constitute_info->label=GetImageOption(image_info,"label");
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
constitute_info->delay_flags=ParseGeometry(option,&geometry_info);
if (constitute_info->delay_flags != NoValue)
{
constitute_info->delay=floor(geometry_info.rho+0.5);
if ((constitute_info->delay_flags & SigmaValue) != 0)
constitute_info->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
}
}
static void SyncOrientationFromProperties(Image *image,
ConstituteInfo *constitute_info,ExceptionInfo *exception)
{
const char
*orientation;
orientation=(const char *) NULL;
if (constitute_info->sync_from_exif != MagickFalse)
{
orientation=GetImageProperty(image,"exif:Orientation",exception);
if (orientation != (const char *) NULL)
{
image->orientation=(OrientationType) StringToLong(orientation);
(void) DeleteImageProperty(image,"exif:Orientation");
}
}
if ((orientation == (const char *) NULL) &&
(constitute_info->sync_from_tiff != MagickFalse))
{
orientation=GetImageProperty(image,"tiff:Orientation",exception);
if (orientation != (const char *) NULL)
{
image->orientation=(OrientationType) StringToLong(orientation);
(void) DeleteImageProperty(image,"tiff:Orientation");
}
}
}
static void SyncResolutionFromProperties(Image *image,
ConstituteInfo *constitute_info, ExceptionInfo *exception)
{
const char
*resolution_x,
*resolution_y,
*resolution_units;
MagickBooleanType
used_tiff;
resolution_x=(const char *) NULL;
resolution_y=(const char *) NULL;
resolution_units=(const char *) NULL;
used_tiff=MagickFalse;
if (constitute_info->sync_from_exif != MagickFalse)
{
resolution_x=GetImageProperty(image,"exif:XResolution",exception);
resolution_y=GetImageProperty(image,"exif:YResolution",exception);
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
resolution_units=GetImageProperty(image,"exif:ResolutionUnit",
exception);
}
if ((resolution_x == (const char *) NULL) &&
(resolution_y == (const char *) NULL) &&
(constitute_info->sync_from_tiff != MagickFalse))
{
resolution_x=GetImageProperty(image,"tiff:XResolution",exception);
resolution_y=GetImageProperty(image,"tiff:YResolution",exception);
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
{
used_tiff=MagickTrue;
resolution_units=GetImageProperty(image,"tiff:ResolutionUnit",
exception);
}
}
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
{
GeometryInfo
geometry_info;
ssize_t
option_type;
geometry_info.rho=image->resolution.x;
geometry_info.sigma=1.0;
(void) ParseGeometry(resolution_x,&geometry_info);
if (geometry_info.sigma != 0)
image->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(resolution_x,',') != (char *) NULL)
image->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
geometry_info.rho=image->resolution.y;
geometry_info.sigma=1.0;
(void) ParseGeometry(resolution_y,&geometry_info);
if (geometry_info.sigma != 0)
image->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(resolution_y,',') != (char *) NULL)
image->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
if (resolution_units != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
resolution_units);
if (option_type >= 0)
image->units=(ResolutionType) option_type;
}
if (used_tiff == MagickFalse)
{
(void) DeleteImageProperty(image,"exif:XResolution");
(void) DeleteImageProperty(image,"exif:YResolution");
(void) DeleteImageProperty(image,"exif:ResolutionUnit");
}
else
{
(void) DeleteImageProperty(image,"tiff:XResolution");
(void) DeleteImageProperty(image,"tiff:YResolution");
(void) DeleteImageProperty(image,"tiff:ResolutionUnit");
}
}
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
ConstituteInfo
constitute_info;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
InheritException(exception,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderThreadSupport(magick_info) == MagickFalse))
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderThreadSupport(magick_info) == MagickFalse))
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) &&
(GetImageListLength(image) != 1))
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones != (Image *) NULL)
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
InitializeConstituteInfo(read_info,&constitute_info);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property;
const StringInfo
*profile;
static const char
*source_date_epoch = (const char *) NULL;
static MagickBooleanType
epoch_initalized = MagickFalse;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if ((*magick_path == '\0') && (*next->magick == '\0'))
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
(void) GetImageProperty(next,"exif:*",exception);
(void) GetImageProperty(next,"icc:*",exception);
(void) GetImageProperty(next,"iptc:*",exception);
(void) GetImageProperty(next,"xmp:*",exception);
SyncOrientationFromProperties(next,&constitute_info,exception);
SyncResolutionFromProperties(next,&constitute_info,exception);
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
if (constitute_info.caption != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.caption,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
if (constitute_info.comment != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.comment,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
if (constitute_info.label != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.label,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
MagickStatusType
flags;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
if (epoch_initalized == MagickFalse)
{
source_date_epoch=getenv("SOURCE_DATE_EPOCH");
epoch_initalized=MagickTrue;
}
if (source_date_epoch == (const char *) NULL)
{
char
timestamp[MagickTimeExtent];
(void) FormatMagickTime(next->timestamp,sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:timestamp",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
}
if (constitute_info.delay_flags != NoValue)
{
if ((constitute_info.delay_flags & GreaterValue) != 0)
{
if (next->delay > constitute_info.delay)
next->delay=constitute_info.delay;
}
else
if ((constitute_info.delay_flags & LessValue) != 0)
{
if (next->delay < constitute_info.delay)
next->delay=constitute_info.delay;
}
else
next->delay=constitute_info.delay;
if ((constitute_info.delay_flags & SigmaValue) != 0)
next->ticks_per_second=constitute_info.ticks_per_second;
}
if (constitute_info.dispose != (const char *) NULL)
{
ssize_t
option_type;
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
constitute_info.dispose);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
if (GetBlobError(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
blob=Base64Decode(++p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
for (p=content; (*p != '/') && (*p != '\0'); p++) ;
if (*p != '\0')
{
char
*q;
ssize_t
i;
/*
Extract media type.
*/
if (LocaleNCompare(++p,"x-",2) == 0)
p+=2;
(void) strcpy(read_info->filename,"data.");
q=read_info->filename+5;
for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++)
*q++=(*p++);
*q++='\0';
}
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderThreadSupport(magick_info) == MagickFalse))
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderThreadSupport(magick_info) == MagickFalse))
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
if (GetBlobError(image) != MagickFalse)
ThrowWriterException(FileOpenError,"UnableToWriteFile");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (images == (Image *) NULL)
return(MagickFalse);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
GB_unop__log10_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log10_fp32_fp32
// op(A') function: GB_unop_tran__log10_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log10f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log10f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log10f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG10 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log10_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log10f (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log10_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-499,512)),ceild(16*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(8*t1+Nx+7,512)),floord(16*t2+Nx+3,512)),floord(16*t3+Nx+3,512)),floord(16*t1-16*t2+Nz+Nx+5,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),128*t4+126);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
omp_report_mask.c | /* Routine reports OpenMP process affinity information.
Get thread number and cpus (cpu_ids)
Create static space (proc_mask) to hold all masks (done in a single region)
Determine the mask for each thread (insert it in proc_mask)
print mask header (one thread in single region)
print mask (one thread in single region)
free spaces
return
Removed check: if(omp_get_num_procs() != ncpus){ on 2019-06-14
*/
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include "opts.h"
void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask, int tpc, char v);
int boundto(int* nelements_set, int* int_mask);
int get_threads_per_node();
void amask_omp(){
static int ncpus, nthrds;
int thrd; //Thread info
int nel_set;
static int ** proc_mask;
int i,j, ierr;
char * dummy;
static char v,p;
static int tpc; // hwthreads/core
Maskopts opts;
thrd = omp_get_thread_num();
#pragma omp single
{
// get print_speed fast or slow (f|c); listing cores or SMT (c|s)
p = opts.get_p();
v = opts.get_v();
tpc = get_threads_per_node();
nthrds = omp_get_num_threads();
ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN);
}
// Uh, omp_get_num_procs doesn't return processors on device as advertised.
// if numactl requests a subset of cpu-ids, it returns the count of bits set.
// So-- removing this "invalid" check. 6/14/2019
/*
if(omp_get_num_procs() != ncpus){
printf("ERROR: ncpus_by_omp=%d, ncpus_sched=%d\n",omp_get_num_procs(),ncpus);
exit(1);
}
*/
#pragma omp single
{
proc_mask = (int **) malloc(sizeof(int*)*nthrds);
for(i=0;i<nthrds;i++) proc_mask[i] = (int * ) malloc(sizeof(int)*ncpus );
for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0;
}
ierr = boundto(&nel_set,proc_mask[thrd]);
#pragma omp barrier
#pragma omp single
{
print_mask(1, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,v); //print header
for(thrd=0;thrd<nthrds;thrd++){
print_mask(0, dummy, 0, 0,thrd, ncpus, 1,nthrds, proc_mask[thrd],tpc,v);
if(p == 's') ierr=usleep(300000);
}
if(nthrds>50)
print_mask(2, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,v); //print header
for(i=0;i<nthrds;i++) free( proc_mask[i]);
free( proc_mask);
}
}
void amask_omp_(){ amask_omp(); }
|
_axpy_pragma.c | void axpy(int n, double *y, double a, double *x) {
register int i;
#pragma Orio Loop(transform Unroll(ufactor=3, parallelize=True))
{
int i;
#pragma omp parallel for private(i)
for (i=0; i<=n-3; i=i+3) {
y[i]=y[i]+a*x[i];
y[(i+1)]=y[(i+1)]+a*x[(i+1)];
y[(i+2)]=y[(i+2)]+a*x[(i+2)];
}
for (i=n-((n-(0))%3); i<=n-1; i=i+1)
y[i]=y[i]+a*x[i];
}
#pragma Oiro
}
|
GB_unop__isinf_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__isinf_bool_fc64
// op(A') function: GB_unop_tran__isinf_bool_fc64
// C type: bool
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = GB_cisinf (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisinf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = GB_cisinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__isinf_bool_fc64
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisinf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__isinf_bool_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_threadprivate.1.c | /*
* @@name: threadprivate.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
int counter = 0;
#pragma omp threadprivate(counter)
int increment_counter()
{
counter++;
return(counter);
}
|
ex8.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "timer.h"
int main(int argc, char const *argv[])
{
int i = 0;
int j = 0;
int count1 = 0;
int pc = 0;
int N;
int T;
double tempo;
double fim;
double inicio;
N = atoi(argv[1]);
T = atoi(argv[2]);
printf("N=%d\nT=%d\n", N, T);
GET_TIME(inicio);
#pragma omp parallel for reduction(+:pc) num_threads(T) private(count1,i,j)
for (i = 2; i <= N; ++i) {
int count1 = 0;
for (j = 1; j <= i; ++j) {
if ((i % j) == 0) {
count1++;
}
}
if (count1 == 2) {
pc++;
}
}
GET_TIME(fim);
tempo = fim - inicio;
printf("Tempo: %.8lf\n", tempo);
printf("PRIMOS: %d\n", pc);
return 0;
}
|
GB_unaryop__lnot_int16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int16_uint64
// op(A') function: GB_tran__lnot_int16_uint64
// C type: int16_t
// A type: uint64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int16_uint64
(
int16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int16_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_uint64)
// op(A') function: GB (_unop_tran__identity_int16_uint64)
// C type: int16_t
// A type: uint64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_uint64)
(
int16_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
t_cholmod_gpu.c | /* ========================================================================== */
/* === GPU/t_cholmod_gpu ==================================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis
* http://www.suitesparse.com
* -------------------------------------------------------------------------- */
/* GPU BLAS template routine for cholmod_super_numeric. */
/* ========================================================================== */
/* === include files and definitions ======================================== */
/* ========================================================================== */
#ifdef GPU_BLAS
#include <string.h>
#include "cholmod_template.h"
#include "cholmod_gpu_kernels.h"
#include <fenv.h>
#include <cuda.h>
#include <cuda_runtime.h>
#undef L_ENTRY
#ifdef REAL
#define L_ENTRY 1
#else
#define L_ENTRY 2
#endif
/* ========================================================================== */
/* === gpu_clear_memory ===================================================== */
/* ========================================================================== */
/*
* Ensure the Lx is zeroed before forming factor. This is a significant cost
* in the GPU case - so using this parallel memset code for efficiency.
*/
void TEMPLATE2 (CHOLMOD (gpu_clear_memory))
(
double* buff,
size_t size,
int num_threads
)
{
int chunk_multiplier = 5;
int num_chunks = chunk_multiplier * num_threads;
size_t chunksize = size / num_chunks;
size_t i;
#pragma omp parallel for num_threads(num_threads) private(i) schedule(dynamic)
for(i = 0; i < num_chunks; i++) {
size_t chunkoffset = i * chunksize;
if(i == num_chunks - 1) {
memset(buff + chunkoffset, 0, (size - chunksize*(num_chunks - 1)) *
sizeof(double));
}
else {
memset(buff + chunkoffset, 0, chunksize * sizeof(double));
}
}
}
/* ========================================================================== */
/* === gpu_init ============================================================= */
/* ========================================================================== */
/*
* Performs required initialization for GPU computing.
*
* Returns 0 if there is an error, so the intended use is
*
* useGPU = CHOLMOD(gpu_init)
*
* which would locally turn off gpu processing if the initialization failed.
*/
int TEMPLATE2 (CHOLMOD (gpu_init))
(
void *Cwork,
cholmod_factor *L,
cholmod_common *Common,
Int nsuper,
Int n,
Int nls,
cholmod_gpu_pointers *gpu_p
)
{
Int i, k, maxSize ;
cublasStatus_t cublasError ;
cudaError_t cudaErr ;
size_t maxBytesSize, HostPinnedSize ;
feenableexcept (FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW );
maxSize = L->maxcsize;
/* #define PAGE_SIZE (4*1024) */
CHOLMOD_GPU_PRINTF (("gpu_init : %p\n",
(void *) ((size_t) Cwork & ~(4*1024-1)))) ;
/* make sure the assumed buffer sizes are large enough */
if ( (nls+2*n+4)*sizeof(Int) > Common->devBuffSize ) {
ERROR (CHOLMOD_GPU_PROBLEM,"\n\n"
"GPU Memory allocation error. Ls, Map and RelativeMap exceed\n"
"devBuffSize. It is not clear if this is due to insufficient\n"
"device or host memory or both. You can try:\n"
" 1) increasing the amount of GPU memory requested\n"
" 2) reducing CHOLMOD_NUM_HOST_BUFFERS\n"
" 3) using a GPU & host with more memory\n"
"This issue is a known limitation and should be fixed in a \n"
"future release of CHOLMOD.\n") ;
return (0) ;
}
/* divvy up the memory in dev_mempool */
gpu_p->d_Lx[0] = Common->dev_mempool;
gpu_p->d_Lx[1] = (char*)Common->dev_mempool + Common->devBuffSize;
gpu_p->d_C = (char*)Common->dev_mempool + 2 * Common->devBuffSize;
gpu_p->d_A[0] = (char*)Common->dev_mempool + 3 * Common->devBuffSize;
gpu_p->d_A[1] = (char*)Common->dev_mempool + 4 * Common->devBuffSize;
gpu_p->d_Ls = (char*)Common->dev_mempool + 5 * Common->devBuffSize;
gpu_p->d_Map = (char*)gpu_p->d_Ls + (nls + 1) * sizeof(Int);
gpu_p->d_RelativeMap = (char*)gpu_p->d_Map + (n + 1) * sizeof(Int);
/* Copy all of the Ls and Lpi data to the device. If any supernodes are
* to be computed on the device then this will be needed, so might as
* well do it now. */
cudaErr = cudaMemcpy ( gpu_p->d_Ls, L->s, nls*sizeof(Int),
cudaMemcpyHostToDevice );
CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)");
if (!(Common->gpuStream[0])) {
/* ------------------------------------------------------------------ */
/* create each CUDA stream */
/* ------------------------------------------------------------------ */
for ( i=0; i<CHOLMOD_HOST_SUPERNODE_BUFFERS; i++ ) {
cudaErr = cudaStreamCreate ( &(Common->gpuStream[i]) );
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA stream") ;
return (0) ;
}
}
/* ------------------------------------------------------------------ */
/* create each CUDA event */
/* ------------------------------------------------------------------ */
for (i = 0 ; i < 3 ; i++) {
cudaErr = cudaEventCreateWithFlags
(&(Common->cublasEventPotrf [i]), cudaEventDisableTiming) ;
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ;
return (0) ;
}
}
for (i = 0 ; i < CHOLMOD_HOST_SUPERNODE_BUFFERS ; i++) {
cudaErr = cudaEventCreateWithFlags
(&(Common->updateCBuffersFree[i]), cudaEventDisableTiming) ;
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ;
return (0) ;
}
}
cudaErr = cudaEventCreateWithFlags ( &(Common->updateCKernelsComplete),
cudaEventDisableTiming );
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA updateCKernelsComplete event") ;
return (0) ;
}
}
gpu_p->h_Lx[0] = (double*)(Common->host_pinned_mempool);
for ( k=1; k<CHOLMOD_HOST_SUPERNODE_BUFFERS; k++ ) {
gpu_p->h_Lx[k] = (double*)((char *)(Common->host_pinned_mempool) +
k*Common->devBuffSize);
}
return (1); /* initialization successfull, useGPU = 1 */
}
/* ========================================================================== */
/* === gpu_reorder_descendants ============================================== */
/* ========================================================================== */
/* Reorder the descendant supernodes as:
* 1st - descendant supernodes eligible for processing on the GPU
* in increasing (by flops) order
* 2nd - supernodes whose processing is to remain on the CPU
* in any order
*
* All of the GPU-eligible supernodes will be scheduled first. All
* CPU-eligible descendants will overlap with the last (largest)
* CHOLMOD_HOST_SUPERNODE_BUFFERS GPU-eligible descendants.
*/
typedef int(*__compar_fn_t) (const void *, const void *);
void TEMPLATE2 (CHOLMOD (gpu_reorder_descendants))
(
cholmod_common *Common,
Int *Super,
Int *locals,
Int *Lpi,
Int *Lpos,
Int *Head,
Int *Next,
Int *Previous,
Int *ndescendants,
Int *tail,
Int *mapCreatedOnGpu,
cholmod_gpu_pointers *gpu_p
)
{
Int prevd, nextd, firstcpu, d, k, kd1, kd2, ndcol, pdi, pdend, pdi1;
Int dnext, ndrow2, p;
Int n_descendant = 0;
double score;
/* use h_Lx[0] to buffer the GPU-eligible descendants */
struct cholmod_descendant_score_t* scores =
(struct cholmod_descendant_score_t*) gpu_p->h_Lx[0];
double cpuref = 0.0;
int nreverse = 1;
int previousd;
d = Head[*locals];
prevd = -1;
firstcpu = -1;
*mapCreatedOnGpu = 0;
while ( d != EMPTY )
{
/* Get the parameters for the current descendant supernode */
kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ; /* # of columns in all of d */
pdi = Lpi [d] ; /* pointer to first row of d in Ls */
pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */
p = Lpos [d] ; /* offset of 1st row of d affecting s */
pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */
ndrow2 = pdend - pdi1;
nextd = Next[d];
/* compute a rough flops 'score' for this descendant supernode */
score = ndrow2 * ndcol;
if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT &&
ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) {
score += Common->devBuffSize;
}
/* place in sort buffer */
scores[n_descendant].score = score;
scores[n_descendant].d = d;
n_descendant++;
d = nextd;
}
/* Sort the GPU-eligible supernodes */
qsort(scores, n_descendant, sizeof(struct cholmod_descendant_score_t),
(__compar_fn_t)CHOLMOD(score_comp));
/* Place sorted data back in descendant supernode linked list*/
if ( n_descendant > 0 ) {
Head[*locals] = scores[0].d;
if ( n_descendant > 1 ) {
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if (n_descendant > 64)
for ( k=1; k<n_descendant; k++ ) {
Next[scores[k-1].d] = scores[k].d;
}
}
Next[scores[n_descendant-1].d] = firstcpu;
}
/* reverse the first CHOLMOD_HOST_SUPERNODE_BUFFERS to better hide PCIe
communications */
if ( Head[*locals] != EMPTY && Next[Head[*locals]] != EMPTY ) {
previousd = Head[*locals];
d = Next[Head[*locals]];
while ( d!=EMPTY && nreverse < CHOLMOD_HOST_SUPERNODE_BUFFERS ) {
kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ; /* # of columns in all of d */
pdi = Lpi [d] ; /* pointer to first row of d in Ls */
pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */
p = Lpos [d] ; /* offset of 1st row of d affecting s */
pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */
ndrow2 = pdend - pdi1;
nextd = Next[d];
nreverse++;
if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >=
CHOLMOD_ND_COL_LIMIT ) {
/* place this supernode at the front of the list */
Next[previousd] = Next[d];
Next[d] = Head[*locals];
Head[*locals] = d;
}
else {
previousd = d;
}
d = nextd;
}
}
/* create a 'previous' list so we can traverse backwards */
*ndescendants = 0;
if ( Head[*locals] != EMPTY ) {
Previous[Head[*locals]] = EMPTY;
for (d = Head [*locals] ; d != EMPTY ; d = dnext) {
(*ndescendants)++;
dnext = Next[d];
if ( dnext != EMPTY ) {
Previous[dnext] = d;
}
else {
*tail = d;
}
}
}
return;
}
/* ========================================================================== */
/* === gpu_initialize_supernode ============================================= */
/* ========================================================================== */
/* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1
*/
void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode))
(
cholmod_common *Common,
Int nscol,
Int nsrow,
Int psi,
cholmod_gpu_pointers *gpu_p
)
{
cudaError_t cuErr;
/* initialize the device supernode assemby memory to zero */
cuErr = cudaMemset ( gpu_p->d_A[0], 0, nscol*nsrow*L_ENTRY*sizeof(double) );
CHOLMOD_HANDLE_CUDA_ERROR(cuErr,"cudaMemset(d_A)");
/* Create the Map on the device */
createMapOnDevice ( (Int *)(gpu_p->d_Map),
(Int *)(gpu_p->d_Ls), psi, nsrow );
return;
}
/* ========================================================================== */
/* === gpu_updateC ========================================================== */
/* ========================================================================== */
/* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1
* refers to all of the rows in L, but many of the rows are all zero.
* Supernode d holds columns kd1 to kd2-1 of L. Nonzero rows in the range
* k1:k2-1 are in the list Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows
* in the range k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2.
* Let L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let L2 = L (Ls [pdi2 ...
* pdend], kd1:kd2-1). C is ndrow2-by-ndrow1. Let C1 be the first ndrow1
* rows of C and let C2 be the last ndrow2-ndrow1 rows of C. Only the lower
* triangular part of C1 needs to be computed since C1 is symmetric.
*
* UpdateC is completely asynchronous w.r.t. the GPU. Once the input buffer
* d_Lx[] has been filled, all of the device operations are issues, and the
* host can continue with filling the next input buffer / or start processing
* all of the descendant supernodes which are not eligible for processing on
* the device (since they are too small - will not fill the device).
*/
int TEMPLATE2 (CHOLMOD (gpu_updateC))
(
Int ndrow1, /* C is ndrow2-by-ndrow2 */
Int ndrow2,
Int ndrow, /* leading dimension of Lx */
Int ndcol, /* L1 is ndrow1-by-ndcol */
Int nsrow,
Int pdx1, /* L1 starts at Lx + L_ENTRY*pdx1 */
/* L2 starts at Lx + L_ENTRY*(pdx1 + ndrow1) */
Int pdi1,
double *Lx,
double *C,
cholmod_common *Common,
cholmod_gpu_pointers *gpu_p
)
{
double *devPtrLx, *devPtrC ;
double alpha, beta ;
cublasStatus_t cublasStatus ;
cudaError_t cudaStat [2] ;
Int ndrow3 ;
int icol, irow;
int iHostBuff, iDevBuff ;
#ifndef NTIMER
double tstart = 0;
#endif
if ((ndrow2*L_ENTRY < CHOLMOD_ND_ROW_LIMIT) ||
(ndcol*L_ENTRY < CHOLMOD_ND_COL_LIMIT))
{
/* too small for the CUDA BLAS; use the CPU instead */
return (0) ;
}
ndrow3 = ndrow2 - ndrow1 ;
#ifndef NTIMER
Common->syrkStart = SuiteSparse_time ( ) ;
Common->CHOLMOD_GPU_SYRK_CALLS++ ;
#endif
/* ---------------------------------------------------------------------- */
/* allocate workspace on the GPU */
/* ---------------------------------------------------------------------- */
iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
/* cycle the device Lx buffer, d_Lx, through CHOLMOD_DEVICE_STREAMS,
usually 2, so we can overlap the copy of this descendent supernode
with the compute of the previous descendant supernode */
devPtrLx = (double *)(gpu_p->d_Lx[iDevBuff]);
/* very little overlap between kernels for difference descendant supernodes
(since we enforce the supernodes must be large enough to fill the
device) so we only need one C buffer */
devPtrC = (double *)(gpu_p->d_C);
/* ---------------------------------------------------------------------- */
/* copy Lx to the GPU */
/* ---------------------------------------------------------------------- */
/* copy host data to pinned buffer first for better H2D bandwidth */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndcol > 32)
for ( icol=0; icol<ndcol; icol++ ) {
for ( irow=0; irow<ndrow2*L_ENTRY; irow++ ) {
gpu_p->h_Lx[iHostBuff][icol*ndrow2*L_ENTRY+irow] =
Lx[pdx1*L_ENTRY+icol*ndrow*L_ENTRY + irow];
}
}
cudaStat[0] = cudaMemcpyAsync ( devPtrLx,
gpu_p->h_Lx[iHostBuff],
ndrow2*ndcol*L_ENTRY*sizeof(devPtrLx[0]),
cudaMemcpyHostToDevice,
Common->gpuStream[iDevBuff] );
if ( cudaStat[0] ) {
CHOLMOD_GPU_PRINTF ((" ERROR cudaMemcpyAsync = %d \n", cudaStat[0]));
return (0);
}
/* make the current stream wait for kernels in previous streams */
cudaStreamWaitEvent ( Common->gpuStream[iDevBuff],
Common->updateCKernelsComplete, 0 ) ;
/* ---------------------------------------------------------------------- */
/* create the relative map for this descendant supernode */
/* ---------------------------------------------------------------------- */
createRelativeMapOnDevice ( (Int *)(gpu_p->d_Map),
(Int *)(gpu_p->d_Ls),
(Int *)(gpu_p->d_RelativeMap),
pdi1, ndrow2,
&(Common->gpuStream[iDevBuff]) );
/* ---------------------------------------------------------------------- */
/* do the CUDA SYRK */
/* ---------------------------------------------------------------------- */
cublasStatus = cublasSetStream (Common->cublasHandle,
Common->gpuStream[iDevBuff]) ;
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ;
}
alpha = 1.0 ;
beta = 0.0 ;
#ifdef REAL
cublasStatus = cublasDsyrk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_N,
(int) ndrow1,
(int) ndcol, /* N, K: L1 is ndrow1-by-ndcol */
&alpha, /* ALPHA: 1 */
devPtrLx,
ndrow2, /* A, LDA: L1, ndrow2 */
&beta, /* BETA: 0 */
devPtrC,
ndrow2) ; /* C, LDC: C1 */
#else
cublasStatus = cublasZherk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_N,
(int) ndrow1,
(int) ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
&alpha, /* ALPHA: 1 */
(const cuDoubleComplex *) devPtrLx,
ndrow2, /* A, LDA: L1, ndrow2 */
&beta, /* BETA: 0 */
(cuDoubleComplex *) devPtrC,
ndrow2) ; /* C, LDC: C1 */
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
#ifndef NTIMER
Common->CHOLMOD_GPU_SYRK_TIME += SuiteSparse_time() - Common->syrkStart;
#endif
/* ---------------------------------------------------------------------- */
/* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C, C2 = L2*L1' */
/* ---------------------------------------------------------------------- */
#ifndef NTIMER
Common->CHOLMOD_GPU_GEMM_CALLS++ ;
tstart = SuiteSparse_time();
#endif
if (ndrow3 > 0)
{
#ifndef REAL
cuDoubleComplex calpha = {1.0,0.0} ;
cuDoubleComplex cbeta = {0.0,0.0} ;
#endif
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dgemm */
/* ------------------------------------------------------------------ */
#ifdef REAL
alpha = 1.0 ;
beta = 0.0 ;
cublasStatus = cublasDgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_T,
ndrow3, ndrow1, ndcol, /* M, N, K */
&alpha, /* ALPHA: 1 */
devPtrLx + L_ENTRY*(ndrow1), /* A, LDA: L2*/
ndrow2, /* ndrow */
devPtrLx, /* B, LDB: L1 */
ndrow2, /* ndrow */
&beta, /* BETA: 0 */
devPtrC + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#else
cublasStatus = cublasZgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_C,
ndrow3, ndrow1, ndcol, /* M, N, K */
&calpha, /* ALPHA: 1 */
(const cuDoubleComplex*) devPtrLx + ndrow1,
ndrow2, /* ndrow */
(const cuDoubleComplex *) devPtrLx,
ndrow2, /* ndrow */
&cbeta, /* BETA: 0 */
(cuDoubleComplex *)devPtrC + ndrow1,
ndrow2) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
}
#ifndef NTIMER
Common->CHOLMOD_GPU_GEMM_TIME += SuiteSparse_time() - tstart;
#endif
/* ------------------------------------------------------------------ */
/* Assemble the update C on the device using the d_RelativeMap */
/* ------------------------------------------------------------------ */
#ifdef REAL
addUpdateOnDevice ( gpu_p->d_A[0], devPtrC,
gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow,
&(Common->gpuStream[iDevBuff]) );
#else
addComplexUpdateOnDevice ( gpu_p->d_A[0], devPtrC,
gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow,
&(Common->gpuStream[iDevBuff]) );
#endif
/* Record an event indicating that kernels for
this descendant are complete */
cudaEventRecord ( Common->updateCKernelsComplete,
Common->gpuStream[iDevBuff]);
cudaEventRecord ( Common->updateCBuffersFree[iHostBuff],
Common->gpuStream[iDevBuff]);
return (1) ;
}
/* ========================================================================== */
/* === gpu_final_assembly =================================================== */
/* ========================================================================== */
/* If the supernode was assembled on both the CPU and the GPU, this will
* complete the supernode assembly on both the GPU and CPU.
*/
void TEMPLATE2 (CHOLMOD (gpu_final_assembly))
(
cholmod_common *Common,
double *Lx,
Int psx,
Int nscol,
Int nsrow,
int supernodeUsedGPU,
int *iHostBuff,
int *iDevBuff,
cholmod_gpu_pointers *gpu_p
)
{
Int iidx, i, j;
Int iHostBuff2 ;
Int iDevBuff2 ;
if ( supernodeUsedGPU ) {
/* ------------------------------------------------------------------ */
/* Apply all of the Shur-complement updates, computed on the gpu, to */
/* the supernode. */
/* ------------------------------------------------------------------ */
*iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
*iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) {
/* If this supernode is going to be factored using the GPU (potrf)
* then it will need the portion of the update assembled ont the
* CPU. So copy that to a pinned buffer an H2D copy to device. */
/* wait until a buffer is free */
cudaEventSynchronize ( Common->updateCBuffersFree[*iHostBuff] );
/* copy update assembled on CPU to a pinned buffer */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j; i<nsrow*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
gpu_p->h_Lx[*iHostBuff][iidx] = Lx[psx*L_ENTRY+iidx];
}
}
/* H2D transfer of update assembled on CPU */
cudaMemcpyAsync ( gpu_p->d_A[1], gpu_p->h_Lx[*iHostBuff],
nscol*nsrow*L_ENTRY*sizeof(double),
cudaMemcpyHostToDevice,
Common->gpuStream[*iDevBuff] );
}
Common->ibuffer++;
iHostBuff2 = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
iDevBuff2 = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
/* wait for all kernels to complete */
cudaEventSynchronize( Common->updateCKernelsComplete );
/* copy assembled Schur-complement updates computed on GPU */
cudaMemcpyAsync ( gpu_p->h_Lx[iHostBuff2], gpu_p->d_A[0],
nscol*nsrow*L_ENTRY*sizeof(double),
cudaMemcpyDeviceToHost,
Common->gpuStream[iDevBuff2] );
if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) {
/* with the current implementation, potrf still uses data from the
* CPU - so put the fully assembled supernode in a pinned buffer for
* fastest access */
/* need both H2D and D2H copies to be complete */
cudaDeviceSynchronize();
/* sum updates from cpu and device on device */
#ifdef REAL
sumAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol );
#else
sumComplexAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0],
-1.0, nsrow, nscol );
#endif
/* place final assembled supernode in pinned buffer */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
gpu_p->h_Lx[*iHostBuff][iidx] -=
gpu_p->h_Lx[iHostBuff2][iidx];
}
}
}
else
{
/* assemble with CPU updates */
cudaDeviceSynchronize();
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j*L_ENTRY; i<nsrow*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] -= gpu_p->h_Lx[iHostBuff2][iidx];
}
}
}
}
return;
}
/* ========================================================================== */
/* === gpu_lower_potrf ====================================================== */
/* ========================================================================== */
/* Cholesky factorzation (dpotrf) of a matrix S, operating on the lower
* triangular part only. S is nscol2-by-nscol2 with leading dimension nsrow.
*
* S is the top part of the supernode (the lower triangular matrx).
* This function also copies the bottom rectangular part of the supernode (B)
* onto the GPU, in preparation for gpu_triangular_solve.
*/
/*
* On entry, d_A[1] contains the fully assembled supernode
*/
int TEMPLATE2 (CHOLMOD (gpu_lower_potrf))
(
Int nscol2, /* S is nscol2-by-nscol2 */
Int nsrow, /* leading dimension of S */
Int psx, /* S is located at Lx + L_ENTRY*psx */
double *Lx, /* contains S; overwritten with Cholesky factor */
Int *info, /* BLAS info return value */
cholmod_common *Common,
cholmod_gpu_pointers *gpu_p
)
{
double *devPtrA, *devPtrB, *A ;
double alpha, beta ;
cudaError_t cudaStat ;
cublasStatus_t cublasStatus ;
Int j, nsrow2, nb, n, gpu_lda, lda, gpu_ldb ;
int ilda, ijb, iinfo ;
#ifndef NTIMER
double tstart ;
#endif
if (nscol2 * L_ENTRY < CHOLMOD_POTRF_LIMIT)
{
/* too small for the CUDA BLAS; use the CPU instead */
return (0) ;
}
#ifndef NTIMER
tstart = SuiteSparse_time ( ) ;
Common->CHOLMOD_GPU_POTRF_CALLS++ ;
#endif
nsrow2 = nsrow - nscol2 ;
/* ---------------------------------------------------------------------- */
/* heuristic to get the block size depending of the problem size */
/* ---------------------------------------------------------------------- */
nb = 128 ;
if (nscol2 > 4096) nb = 256 ;
if (nscol2 > 8192) nb = 384 ;
n = nscol2 ;
gpu_lda = ((nscol2+31)/32)*32 ;
lda = nsrow ;
A = gpu_p->h_Lx[(Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1)%
CHOLMOD_HOST_SUPERNODE_BUFFERS];
/* ---------------------------------------------------------------------- */
/* determine the GPU leading dimension of B */
/* ---------------------------------------------------------------------- */
gpu_ldb = 0 ;
if (nsrow2 > 0)
{
gpu_ldb = ((nsrow2+31)/32)*32 ;
}
/* ---------------------------------------------------------------------- */
/* remember where device memory is, to be used by triangular solve later */
/* ---------------------------------------------------------------------- */
devPtrA = gpu_p->d_Lx[0];
devPtrB = gpu_p->d_Lx[1];
/* ---------------------------------------------------------------------- */
/* copy A from device to device */
/* ---------------------------------------------------------------------- */
cudaStat = cudaMemcpy2DAsync ( devPtrA,
gpu_lda * L_ENTRY * sizeof (devPtrA[0]),
gpu_p->d_A[1],
nsrow * L_ENTRY * sizeof (Lx[0]),
nscol2 * L_ENTRY * sizeof (devPtrA[0]),
nscol2,
cudaMemcpyDeviceToDevice,
Common->gpuStream[0] );
if ( cudaStat ) {
ERROR ( CHOLMOD_GPU_PROBLEM, "GPU memcopy device to device");
}
/* ---------------------------------------------------------------------- */
/* copy B in advance, for gpu_triangular_solve */
/* ---------------------------------------------------------------------- */
if (nsrow2 > 0)
{
cudaStat = cudaMemcpy2DAsync (devPtrB,
gpu_ldb * L_ENTRY * sizeof (devPtrB [0]),
gpu_p->d_A[1] + L_ENTRY*nscol2,
nsrow * L_ENTRY * sizeof (Lx [0]),
nsrow2 * L_ENTRY * sizeof (devPtrB [0]),
nscol2,
cudaMemcpyDeviceToDevice,
Common->gpuStream[0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
}
/* ------------------------------------------------------------------ */
/* define the dpotrf stream */
/* ------------------------------------------------------------------ */
cublasStatus = cublasSetStream (Common->cublasHandle,
Common->gpuStream [0]) ;
if (cublasStatus != CUBLAS_STATUS_SUCCESS) {
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ;
}
/* ---------------------------------------------------------------------- */
/* block Cholesky factorization of S */
/* ---------------------------------------------------------------------- */
for (j = 0 ; j < n ; j += nb)
{
Int jb = nb < (n-j) ? nb : (n-j) ;
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dsyrk */
/* ------------------------------------------------------------------ */
alpha = -1.0 ;
beta = 1.0 ;
#ifdef REAL
cublasStatus = cublasDsyrk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j,
&alpha, devPtrA + j, gpu_lda,
&beta, devPtrA + j + j*gpu_lda, gpu_lda) ;
#else
cublasStatus = cublasZherk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j,
&alpha, (cuDoubleComplex*)devPtrA + j,
gpu_lda,
&beta,
(cuDoubleComplex*)devPtrA + j + j*gpu_lda,
gpu_lda) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
/* ------------------------------------------------------------------ */
cudaStat = cudaEventRecord (Common->cublasEventPotrf [0],
Common->gpuStream [0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
cudaStat = cudaStreamWaitEvent (Common->gpuStream [1],
Common->cublasEventPotrf [0], 0) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
/* ------------------------------------------------------------------ */
/* copy back the jb columns on two different streams */
/* ------------------------------------------------------------------ */
cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + j*lda),
lda * L_ENTRY * sizeof (double),
devPtrA + L_ENTRY*(j + j*gpu_lda),
gpu_lda * L_ENTRY * sizeof (double),
L_ENTRY * sizeof (double)*jb,
jb,
cudaMemcpyDeviceToHost,
Common->gpuStream [1]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ;
}
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dgemm */
/* ------------------------------------------------------------------ */
if ((j+jb) < n)
{
#ifdef REAL
alpha = -1.0 ;
beta = 1.0 ;
cublasStatus = cublasDgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_T,
(n-j-jb), jb, j,
&alpha,
devPtrA + (j+jb), gpu_lda,
devPtrA + (j) , gpu_lda,
&beta,
devPtrA + (j+jb + j*gpu_lda), gpu_lda) ;
#else
cuDoubleComplex calpha = {-1.0,0.0} ;
cuDoubleComplex cbeta = { 1.0,0.0} ;
cublasStatus = cublasZgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_C,
(n-j-jb), jb, j,
&calpha,
(cuDoubleComplex*)devPtrA + (j+jb),
gpu_lda,
(cuDoubleComplex*)devPtrA + (j),
gpu_lda,
&cbeta,
(cuDoubleComplex*)devPtrA +
(j+jb + j*gpu_lda),
gpu_lda ) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
}
cudaStat = cudaStreamSynchronize (Common->gpuStream [1]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
/* ------------------------------------------------------------------ */
/* compute the Cholesky factorization of the jbxjb block on the CPU */
/* ------------------------------------------------------------------ */
ilda = (int) lda ;
ijb = jb ;
#ifdef REAL
LAPACK_DPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ;
#else
LAPACK_ZPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ;
#endif
*info = iinfo ;
if (*info != 0)
{
*info = *info + j ;
break ;
}
/* ------------------------------------------------------------------ */
/* copy the result back to the GPU */
/* ------------------------------------------------------------------ */
cudaStat = cudaMemcpy2DAsync (devPtrA + L_ENTRY*(j + j*gpu_lda),
gpu_lda * L_ENTRY * sizeof (double),
A + L_ENTRY * (j + j*lda),
lda * L_ENTRY * sizeof (double),
L_ENTRY * sizeof (double) * jb,
jb,
cudaMemcpyHostToDevice,
Common->gpuStream [0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dtrsm */
/* ------------------------------------------------------------------ */
if ((j+jb) < n)
{
#ifdef REAL
alpha = 1.0 ;
cublasStatus = cublasDtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT,
(n-j-jb), jb,
&alpha,
devPtrA + (j + j*gpu_lda), gpu_lda,
devPtrA + (j+jb + j*gpu_lda), gpu_lda) ;
#else
cuDoubleComplex calpha = {1.0,0.0};
cublasStatus = cublasZtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT,
(n-j-jb), jb,
&calpha,
(cuDoubleComplex *)devPtrA +
(j + j*gpu_lda),
gpu_lda,
(cuDoubleComplex *)devPtrA +
(j+jb + j*gpu_lda),
gpu_lda) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
/* -------------------------------------------------------------- */
/* Copy factored column back to host. */
/* -------------------------------------------------------------- */
cudaStat = cudaEventRecord (Common->cublasEventPotrf[2],
Common->gpuStream[0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
cudaStat = cudaStreamWaitEvent (Common->gpuStream[1],
Common->cublasEventPotrf[2], 0) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + jb + j * lda),
lda * L_ENTRY * sizeof (double),
devPtrA + L_ENTRY*
(j + jb + j * gpu_lda),
gpu_lda * L_ENTRY * sizeof (double),
L_ENTRY * sizeof (double)*
(n - j - jb), jb,
cudaMemcpyDeviceToHost,
Common->gpuStream[1]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
}
}
#ifndef NTIMER
Common->CHOLMOD_GPU_POTRF_TIME += SuiteSparse_time ( ) - tstart ;
#endif
return (1) ;
}
/* ========================================================================== */
/* === gpu_triangular_solve ================================================= */
/* ========================================================================== */
/* The current supernode is columns k1 to k2-1 of L. Let L1 be the diagonal
* block (factorized by dpotrf/zpotrf above; rows/cols k1:k2-1), and L2 be rows
* k2:n-1 and columns k1:k2-1 of L. The triangular system to solve is L2*L1' =
* S2, where S2 is overwritten with L2. More precisely, L2 = S2 / L1' in
* MATLAB notation.
*/
/* Version with pre-allocation in POTRF */
int TEMPLATE2 (CHOLMOD (gpu_triangular_solve))
(
Int nsrow2, /* L1 and S2 are nsrow2-by-nscol2 */
Int nscol2, /* L1 is nscol2-by-nscol2 */
Int nsrow, /* leading dimension of L1, L2, and S2 */
Int psx, /* L1 is at Lx+L_ENTRY*psx;
* L2 at Lx+L_ENTRY*(psx+nscol2)*/
double *Lx, /* holds L1, L2, and S2 */
cholmod_common *Common,
cholmod_gpu_pointers *gpu_p
)
{
double *devPtrA, *devPtrB ;
cudaError_t cudaStat ;
cublasStatus_t cublasStatus ;
Int gpu_lda, gpu_ldb, gpu_rowstep ;
Int gpu_row_start = 0 ;
Int gpu_row_max_chunk, gpu_row_chunk;
int ibuf = 0;
int iblock = 0;
int iHostBuff = (Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1) %
CHOLMOD_HOST_SUPERNODE_BUFFERS;
int i, j;
Int iidx;
int iwrap;
#ifndef NTIMER
double tstart ;
#endif
#ifdef REAL
double alpha = 1.0 ;
gpu_row_max_chunk = 768;
#else
cuDoubleComplex calpha = {1.0,0.0} ;
gpu_row_max_chunk = 256;
#endif
if ( nsrow2 <= 0 )
{
return (0) ;
}
#ifndef NTIMER
tstart = SuiteSparse_time ( ) ;
Common->CHOLMOD_GPU_TRSM_CALLS++ ;
#endif
gpu_lda = ((nscol2+31)/32)*32 ;
gpu_ldb = ((nsrow2+31)/32)*32 ;
devPtrA = gpu_p->d_Lx[0];
devPtrB = gpu_p->d_Lx[1];
/* make sure the copy of B has completed */
cudaStreamSynchronize( Common->gpuStream[0] );
/* ---------------------------------------------------------------------- */
/* do the CUDA BLAS dtrsm */
/* ---------------------------------------------------------------------- */
while ( gpu_row_start < nsrow2 )
{
gpu_row_chunk = nsrow2 - gpu_row_start;
if ( gpu_row_chunk > gpu_row_max_chunk ) {
gpu_row_chunk = gpu_row_max_chunk;
}
cublasStatus = cublasSetStream ( Common->cublasHandle,
Common->gpuStream[ibuf] );
if ( cublasStatus != CUBLAS_STATUS_SUCCESS )
{
ERROR ( CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream");
}
#ifdef REAL
cublasStatus = cublasDtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_T,
CUBLAS_DIAG_NON_UNIT,
gpu_row_chunk,
nscol2,
&alpha,
devPtrA,
gpu_lda,
devPtrB + gpu_row_start,
gpu_ldb) ;
#else
cublasStatus = cublasZtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_C,
CUBLAS_DIAG_NON_UNIT,
gpu_row_chunk,
nscol2,
&calpha,
(const cuDoubleComplex *) devPtrA,
gpu_lda,
(cuDoubleComplex *)devPtrB + gpu_row_start ,
gpu_ldb) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
/* ------------------------------------------------------------------ */
/* copy result back to the CPU */
/* ------------------------------------------------------------------ */
cudaStat = cudaMemcpy2DAsync (
gpu_p->h_Lx[iHostBuff] +
L_ENTRY*(nscol2+gpu_row_start),
nsrow * L_ENTRY * sizeof (Lx [0]),
devPtrB + L_ENTRY*gpu_row_start,
gpu_ldb * L_ENTRY * sizeof (devPtrB [0]),
gpu_row_chunk * L_ENTRY *
sizeof (devPtrB [0]),
nscol2,
cudaMemcpyDeviceToHost,
Common->gpuStream[ibuf]);
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ;
}
cudaEventRecord ( Common->updateCBuffersFree[ibuf],
Common->gpuStream[ibuf] );
gpu_row_start += gpu_row_chunk;
ibuf++;
ibuf = ibuf % CHOLMOD_HOST_SUPERNODE_BUFFERS;
iblock ++;
if ( iblock >= CHOLMOD_HOST_SUPERNODE_BUFFERS )
{
Int gpu_row_start2 ;
Int gpu_row_end ;
/* then CHOLMOD_HOST_SUPERNODE_BUFFERS worth of work has been
* scheduled, so check for completed events and copy result into
* Lx before continuing. */
cudaEventSynchronize ( Common->updateCBuffersFree
[iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] );
/* copy into Lx */
gpu_row_start2 = nscol2 +
(iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS)
*gpu_row_max_chunk;
gpu_row_end = gpu_row_start2+gpu_row_max_chunk;
if ( gpu_row_end > nsrow ) gpu_row_end = nsrow;
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if ( nscol2 > 32 )
for ( j=0; j<nscol2; j++ ) {
for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
}
}
/* Convenient to copy the L1 block here */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private ( iidx ) if ( nscol2 > 32 )
for ( j=0; j<nscol2; j++ ) {
for ( i=j*L_ENTRY; i<nscol2*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY + i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
/* now account for the last HSTREAMS buffers */
for ( iwrap=0; iwrap<CHOLMOD_HOST_SUPERNODE_BUFFERS; iwrap++ )
{
int i, j;
Int gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS)
*gpu_row_max_chunk;
if (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS >= 0 &&
gpu_row_start2 < nsrow )
{
Int iidx;
Int gpu_row_end = gpu_row_start2+gpu_row_max_chunk;
if ( gpu_row_end > nsrow ) gpu_row_end = nsrow;
cudaEventSynchronize ( Common->updateCBuffersFree
[iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] );
/* copy into Lx */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if ( nscol2 > 32 )
for ( j=0; j<nscol2; j++ ) {
for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
}
iblock++;
}
/* ---------------------------------------------------------------------- */
/* return */
/* ---------------------------------------------------------------------- */
#ifndef NTIMER
Common->CHOLMOD_GPU_TRSM_TIME += SuiteSparse_time ( ) - tstart ;
#endif
return (1) ;
}
/* ========================================================================== */
/* === gpu_copy_supernode =================================================== */
/* ========================================================================== */
/*
* In the event gpu_triangular_sovle is not needed / called, this routine
* copies the factored diagonal block from the GPU to the CPU.
*/
void TEMPLATE2 (CHOLMOD (gpu_copy_supernode))
(
cholmod_common *Common,
double *Lx,
Int psx,
Int nscol,
Int nscol2,
Int nsrow,
int supernodeUsedGPU,
int iHostBuff,
cholmod_gpu_pointers *gpu_p
)
{
Int iidx, i, j;
if ( supernodeUsedGPU && nscol2 * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) {
cudaDeviceSynchronize();
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx,i,j) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
}
return;
}
#endif
#undef REAL
#undef COMPLEX
#undef ZOMPLEX
|
fwk_cooker.h | // asset pipeline framework
// - rlyeh, public domain.
//
// all cooked assets are stored inside .cook.zip file at root folder, which acts as an asset database.
// during game boot, the database gets rebuilt as follows:
// 1. compare local disk files against file in zip database. for each mismatch do:
// 2. - invalidate its entry in database, if local file was removed from disk.
// 3. - write its *cooked* contents into database, if local file was created or modified from disk.
//
// notes: meta-datas from every raw asset are stored into comment field, inside .cook.zip archive.
// @todo: fix leaks
// @todo: symlink exact files
// @todo: parallelize list of files in N cores. get N .cook files instead. mount them all.
#ifndef COOKER_H
#define COOKER_H
enum {
COOKER_ASYNC = 1,
};
// user defined callback for asset cooking:
// must read infile, process data, and write it to outfile
// must set errno on exit if errors are found
// must return compression level if archive needs to be cooked, else return <0
typedef int (*cooker_callback_t)(char *filename, const char *ext, const char header[16], FILE *in, FILE *out, const char *info);
int cooker_progress(); // [0..100]
bool cooker( const char *masks, cooker_callback_t cb, int flags );
#endif
// -----------------------------------------------------------------------------
#ifdef COOKER_C
#pragma once
#ifdef _MSC_VER
#include <io.h>
#else
#include <unistd.h>
#endif
#ifndef PATH_MAX
#define PATH_MAX MAX_PATH
#endif
#ifndef COOKER_TMPFILE
#define COOKER_TMPFILE ".temp" // tmpnam(0) // ".temp"
#endif
typedef struct fs {
char *fname, status;
uint64_t stamp;
uint64_t bytes;
} fs;
struct cooker_args {
const char **files;
cooker_callback_t callback;
char zipfile[16];
int from, to;
};
static
array(fs) cooker__fs_scan(struct cooker_args *args) {
array(struct fs) fs = 0;
// iterate all previously scanned files
for( int i = args->from; i < args->to; ++i ) {
const char *fname = args->files[i];
if( file_directory(fname) ) continue; // skip folders
// @todo: normalize path & rebase here (absolute to local)
// [...]
// fi.normalized = ; tolower->to_underscore([]();:+ )->remove_extra_underscores
// make buffer writable
char buffer[PATH_MAX];
snprintf(buffer, PATH_MAX, "%s", fname);
// get normalized current working directory (absolute)
char cwd[PATH_MAX] = {0};
getcwd(cwd, sizeof(cwd));
for(int i = 0; cwd[i]; ++i) if(cwd[i] == '\\') cwd[i] = '/';
// normalize path
for(int i = 0; buffer[i]; ++i) if(buffer[i] == '\\') buffer[i] = '/';
// rebase from absolute to relative
char *buf = buffer; int cwdlen = strlen(cwd);
if( !strncmp(buf, cwd, cwdlen) ) buf += cwdlen;
while(buf[0] == '/') ++buf;
if( file_name(buf)[0] == '.' ) continue; // skip system files
struct fs fi = {0};
fi.fname = STRDUP(buf);
fi.bytes = file_size(buf);
fi.stamp = file_stamp_human(buf); // human-readable base10 timestamp
array_push(fs, fi);
}
return fs;
}
static
fs *cooker__fs_locate(array(struct fs) fs, const char *file) {
for(int i = 0; i < array_count(fs); ++i) {
if( !strcmp(fs[i].fname,file)) {
return &fs[i];
}
}
return 0;
}
static array(char*) added;
static array(char*) changed;
static array(char*) deleted;
static array(char*) uncooked;
static
int cooker__fs_diff( zip* old, array(fs) now ) {
array_free(added);
array_free(changed);
array_free(deleted);
array_free(uncooked);
// if not zipfile is present, all files are new and must be added
if( !old ) {
for( int i = 0; i < array_count(now); ++i ) {
array_push(uncooked, STRDUP(now[i].fname));
}
return 1;
}
// compare for new & changed files
for( int i = 0; i < array_count(now); ++i ) {
int found = zip_find(old, now[i].fname);
if( found < 0 ) {
array_push(added, STRDUP(now[i].fname));
array_push(uncooked, STRDUP(now[i].fname));
} else {
uint64_t oldsize = _atoi64(zip_comment(old,found)); // zip_size(old, found); returns sizeof processed asset. return original size of unprocessed asset, which we store in comment section
uint64_t oldstamp = _atoi64(zip_modt(old, found)+20);
if( oldsize != now[i].bytes || abs(oldstamp - now[i].stamp) > 1 ) { // @fixme: should use hash instead. hashof(tool) ^ hashof(args used) ^ hashof(rawsize) ^ hashof(rawdate)
printf("%s:\t%llu vs %llu, %llu vs %llu\n", now[i].fname, (uint64_t)oldsize,(uint64_t)now[i].bytes, (uint64_t)oldstamp,(uint64_t)now[i].stamp);
array_push(changed, STRDUP(now[i].fname));
array_push(uncooked, STRDUP(now[i].fname));
}
}
}
// compare for deleted files
for( int i = 0; i < zip_count(old); ++i ) {
char *oldname = zip_name(old, i);
int idx = zip_find(old, oldname); // find latest versioned file in zip
unsigned oldsize = zip_size(old, idx);
if (!oldsize) continue;
fs *found = cooker__fs_locate(now, oldname);
if( !found ) {
array_push(deleted, STRDUP(oldname));
}
}
return 1;
}
static volatile int cooker__progress = 0;
int cooker_progress() {
return cooker__progress;
}
static
int cooker_sync( void *userptr ) {
struct cooker_args *args = userptr;
ASSERT( args && args->callback );
array(struct fs) now = cooker__fs_scan(args);
//printf("Scanned: %d items found\n", array_count(now));
if( file_size(args->zipfile) == 0 ) unlink(args->zipfile);
// populate added/deleted/changed arrays by examining current disk vs last cache
zip *z = zip_open(args->zipfile, "r+b");
cooker__fs_diff(z, now);
if( z ) zip_close(z);
fflush(0);
z = zip_open(args->zipfile, "a+b");
if( !z ) {
unlink(args->zipfile);
z = zip_open(args->zipfile, "a+b"); // try again
if(!z) PANIC("cannot open file for updating: %s", args->zipfile);
}
// deleted files
for( int i = 0, end = array_count(deleted); i < end; ++i ) {
printf("Deleting %03d%% %s\n", (i+1) == end ? 100 : (i * 100) / end, deleted[i]);
FILE* out = fopen(COOKER_TMPFILE, "wb"); fclose(out);
FILE* in = fopen(COOKER_TMPFILE, "rb");
char *comment = "0";
zip_append_file(z, deleted[i], comment, in, 0);
fclose(in);
}
// added or changed files
// #pragma omp parallel for
for( int i = 0, end = array_count(uncooked); i < end; ++i ) {
cooker__progress = (i+1) == end ? 100 : (i * 100) / end; // (i+i>0) * 100.f / end;
char *fname = uncooked[i];
FILE *in = fopen(fname, "rb");
if( !in ) PANIC("cannot open file for reading: %s", fname);
fseek(in, 0L, SEEK_END);
size_t inlen = ftell(in);
fseek(in, 0L, SEEK_SET);
unlink(COOKER_TMPFILE);
FILE *out = fopen(COOKER_TMPFILE, "a+b");
if( !out ) PANIC("cannot open .temp file for writing");
fseek(out, 0L, SEEK_SET);
char *ext = strrchr(fname, '.'); ext = ext ? ext : ""; // .jpg
char header[16]; fread(header, 1, 16, in); fseek(in, 0L, SEEK_SET);
const char *info = stringf("Cooking %03d%% %s\n", cooker__progress, uncooked[i]);
int compression = (errno = 0, args->callback(fname, ext, header, in, out, info));
int failed = errno != 0;
if( failed ) PRINTF("importing failed: %s", fname);
else if( compression >= 0 ) {
fseek(out, 0L, SEEK_SET);
char *comment = stringf("%d",(int)inlen);
if( !zip_append_file(z, fname, comment, out, compression) ) {
PANIC("failed to add processed file into %s: %s", args->zipfile, fname);
}
}
fclose(in);
fclose(out);
}
zip_close(z);
unlink(COOKER_TMPFILE);
fflush(0);
cooker__progress = 100;
return 1;
}
static
int cooker_async( void *userptr ) {
while(!window_handle()) sleep_ms(100); // wait for window handle to be created
int ret = cooker_sync(userptr);
thread_exit( ret );
return ret;
}
bool cooker( const char *masks, cooker_callback_t callback, int flags ) {
static struct cooker_args args[1] = {0};
const char **files = file_list(masks);
int numfiles = 0; while(files[numfiles]) ++numfiles;
args[0].files = files;
args[0].callback = callback;
args[0].from = 0;
args[0].to = numfiles;
for( int i = 0; i < countof(args); ++i) snprintf(args[i].zipfile, 16, ".cook[%d].zip", i);
//
if( flags & COOKER_ASYNC ) {
int numthreads = countof(args);
for( int i = 0; i < numthreads; ++i ) {
args[i] = args[0];
args[i].from = i == 0 ? 0 : args[i-1].to;
args[i].to = i == (numthreads-1) ? numfiles : (numfiles * (i+1.) / numthreads);
thread_ptr_t thd = thread_create( cooker_async, &args[i], "cooker_async()", 0/*STACK_SIZE*/ );
}
return true;
} else {
return !!cooker_sync( &args[0] );
}
}
#endif
|
Fig_6.12_piLoopCombined.c | #include <stdio.h>
#include <omp.h>
#define NTHREADS 4
static long num_steps = 100000000;
double step;
int main ()
{
int i;
double x, pi, sum = 0.0;
double start_time, run_time;
step = 1.0/(double) num_steps;
omp_set_num_threads(NTHREADS);
start_time = omp_get_wtime();
#pragma omp parallel for private(x) reduction(+:sum)
for (i = 0; i < num_steps; i++) {
x = (i + 0.5) * step;
sum += 4.0 / (1.0 + x * x);
}
pi = step * sum;
run_time = omp_get_wtime() - start_time;
printf("pi is %f in %f seconds %d threads\n", pi, run_time);
}
|
convolution_1x1_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(1, num_input, num_output);
weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)4 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = weight_data_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
float* g00 = g0.row(p / 8);
g00[0] = k00[0];
g00[1] = k10[0];
g00[2] = k20[0];
g00[3] = k30[0];
g00[4] = k40[0];
g00[5] = k50[0];
g00[6] = k60[0];
g00[7] = k70[0];
g00 += 8;
g00[0] = k01[0];
g00[1] = k11[0];
g00[2] = k21[0];
g00[3] = k31[0];
g00[4] = k41[0];
g00[5] = k51[0];
g00[6] = k61[0];
g00[7] = k71[0];
g00 += 8;
g00[0] = k02[0];
g00[1] = k12[0];
g00[2] = k22[0];
g00[3] = k32[0];
g00[4] = k42[0];
g00[5] = k52[0];
g00[6] = k62[0];
g00[7] = k72[0];
g00 += 8;
g00[0] = k03[0];
g00[1] = k13[0];
g00[2] = k23[0];
g00[3] = k33[0];
g00[4] = k43[0];
g00[5] = k53[0];
g00[6] = k63[0];
g00[7] = k73[0];
g00 += 8;
g00[0] = k04[0];
g00[1] = k14[0];
g00[2] = k24[0];
g00[3] = k34[0];
g00[4] = k44[0];
g00[5] = k54[0];
g00[6] = k64[0];
g00[7] = k74[0];
g00 += 8;
g00[0] = k05[0];
g00[1] = k15[0];
g00[2] = k25[0];
g00[3] = k35[0];
g00[4] = k45[0];
g00[5] = k55[0];
g00[6] = k65[0];
g00[7] = k75[0];
g00 += 8;
g00[0] = k06[0];
g00[1] = k16[0];
g00[2] = k26[0];
g00[3] = k36[0];
g00[4] = k46[0];
g00[5] = k56[0];
g00[6] = k66[0];
g00[7] = k76[0];
g00 += 8;
g00[0] = k07[0];
g00[1] = k17[0];
g00[2] = k27[0];
g00[3] = k37[0];
g00[4] = k47[0];
g00[5] = k57[0];
g00[6] = k67[0];
g00[7] = k77[0];
g00 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
__m256 _r8 = _mm256_loadu_ps(img0 + 64);
__m256 _r9 = _mm256_loadu_ps(img0 + 72);
__m256 _r10 = _mm256_loadu_ps(img0 + 80);
__m256 _r11 = _mm256_loadu_ps(img0 + 88);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
_mm256_storeu_ps(tmpptr + 64, _r8);
_mm256_storeu_ps(tmpptr + 72, _r9);
_mm256_storeu_ps(tmpptr + 80, _r10);
_mm256_storeu_ps(tmpptr + 88, _r11);
tmpptr += 96;
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
tmpptr += 64;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
tmpptr += 32;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
tmpptr += 16;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
_mm256_storeu_ps(tmpptr, _r0);
tmpptr += 8;
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float* outptr = out;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
__m256 _sum8 = _bias0;
__m256 _sum9 = _bias0;
__m256 _sum10 = _bias0;
__m256 _sum11 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7);
__m256 _val80 = _mm256_broadcast_ss(tmpptr + 64);
__m256 _val81 = _mm256_broadcast_ss(tmpptr + 65);
__m256 _val82 = _mm256_broadcast_ss(tmpptr + 66);
__m256 _val83 = _mm256_broadcast_ss(tmpptr + 67);
__m256 _val84 = _mm256_broadcast_ss(tmpptr + 68);
__m256 _val85 = _mm256_broadcast_ss(tmpptr + 69);
__m256 _val86 = _mm256_broadcast_ss(tmpptr + 70);
__m256 _val87 = _mm256_broadcast_ss(tmpptr + 71);
__m256 _val90 = _mm256_broadcast_ss(tmpptr + 72);
__m256 _val91 = _mm256_broadcast_ss(tmpptr + 73);
__m256 _val92 = _mm256_broadcast_ss(tmpptr + 74);
__m256 _val93 = _mm256_broadcast_ss(tmpptr + 75);
__m256 _val94 = _mm256_broadcast_ss(tmpptr + 76);
__m256 _val95 = _mm256_broadcast_ss(tmpptr + 77);
__m256 _val96 = _mm256_broadcast_ss(tmpptr + 78);
__m256 _val97 = _mm256_broadcast_ss(tmpptr + 79);
_sum8 = _mm256_fmadd_ps(_w0, _val80, _sum8);
_sum8 = _mm256_fmadd_ps(_w1, _val81, _sum8);
_sum8 = _mm256_fmadd_ps(_w2, _val82, _sum8);
_sum8 = _mm256_fmadd_ps(_w3, _val83, _sum8);
_sum8 = _mm256_fmadd_ps(_w4, _val84, _sum8);
_sum8 = _mm256_fmadd_ps(_w5, _val85, _sum8);
_sum8 = _mm256_fmadd_ps(_w6, _val86, _sum8);
_sum8 = _mm256_fmadd_ps(_w7, _val87, _sum8);
_sum9 = _mm256_fmadd_ps(_w0, _val90, _sum9);
_sum9 = _mm256_fmadd_ps(_w1, _val91, _sum9);
_sum9 = _mm256_fmadd_ps(_w2, _val92, _sum9);
_sum9 = _mm256_fmadd_ps(_w3, _val93, _sum9);
_sum9 = _mm256_fmadd_ps(_w4, _val94, _sum9);
_sum9 = _mm256_fmadd_ps(_w5, _val95, _sum9);
_sum9 = _mm256_fmadd_ps(_w6, _val96, _sum9);
_sum9 = _mm256_fmadd_ps(_w7, _val97, _sum9);
__m256 _val100 = _mm256_broadcast_ss(tmpptr + 80);
__m256 _val101 = _mm256_broadcast_ss(tmpptr + 81);
__m256 _val102 = _mm256_broadcast_ss(tmpptr + 82);
__m256 _val103 = _mm256_broadcast_ss(tmpptr + 83);
__m256 _val104 = _mm256_broadcast_ss(tmpptr + 84);
__m256 _val105 = _mm256_broadcast_ss(tmpptr + 85);
__m256 _val106 = _mm256_broadcast_ss(tmpptr + 86);
__m256 _val107 = _mm256_broadcast_ss(tmpptr + 87);
__m256 _val110 = _mm256_broadcast_ss(tmpptr + 88);
__m256 _val111 = _mm256_broadcast_ss(tmpptr + 89);
__m256 _val112 = _mm256_broadcast_ss(tmpptr + 90);
__m256 _val113 = _mm256_broadcast_ss(tmpptr + 91);
__m256 _val114 = _mm256_broadcast_ss(tmpptr + 92);
__m256 _val115 = _mm256_broadcast_ss(tmpptr + 93);
__m256 _val116 = _mm256_broadcast_ss(tmpptr + 94);
__m256 _val117 = _mm256_broadcast_ss(tmpptr + 95);
_sum10 = _mm256_fmadd_ps(_w0, _val100, _sum10);
_sum10 = _mm256_fmadd_ps(_w1, _val101, _sum10);
_sum10 = _mm256_fmadd_ps(_w2, _val102, _sum10);
_sum10 = _mm256_fmadd_ps(_w3, _val103, _sum10);
_sum10 = _mm256_fmadd_ps(_w4, _val104, _sum10);
_sum10 = _mm256_fmadd_ps(_w5, _val105, _sum10);
_sum10 = _mm256_fmadd_ps(_w6, _val106, _sum10);
_sum10 = _mm256_fmadd_ps(_w7, _val107, _sum10);
_sum11 = _mm256_fmadd_ps(_w0, _val110, _sum11);
_sum11 = _mm256_fmadd_ps(_w1, _val111, _sum11);
_sum11 = _mm256_fmadd_ps(_w2, _val112, _sum11);
_sum11 = _mm256_fmadd_ps(_w3, _val113, _sum11);
_sum11 = _mm256_fmadd_ps(_w4, _val114, _sum11);
_sum11 = _mm256_fmadd_ps(_w5, _val115, _sum11);
_sum11 = _mm256_fmadd_ps(_w6, _val116, _sum11);
_sum11 = _mm256_fmadd_ps(_w7, _val117, _sum11);
tmpptr += 96;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
_mm256_storeu_ps(outptr + 64, _sum8);
_mm256_storeu_ps(outptr + 72, _sum9);
_mm256_storeu_ps(outptr + 80, _sum10);
_mm256_storeu_ps(outptr + 88, _sum11);
outptr += 96;
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7);
tmpptr += 64;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
outptr += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
tmpptr += 32;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
outptr += 32;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
tmpptr += 16;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
outptr += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
__m256 _sum = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum = _mm256_fmadd_ps(_w0, _val0, _sum);
_sum = _mm256_fmadd_ps(_w1, _val1, _sum);
_sum = _mm256_fmadd_ps(_w2, _val2, _sum);
_sum = _mm256_fmadd_ps(_w3, _val3, _sum);
_sum = _mm256_fmadd_ps(_w4, _val4, _sum);
_sum = _mm256_fmadd_ps(_w5, _val5, _sum);
_sum = _mm256_fmadd_ps(_w6, _val6, _sum);
_sum = _mm256_fmadd_ps(_w7, _val7, _sum);
tmpptr += 8;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
static void conv1x1s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_loadu_ps(r0);
_mm256_storeu_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
} |
vector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there
/*--------------------------------------------------------------------------
* hypre_SeqVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCreate( HYPRE_Int size )
{
hypre_Vector *vector;
vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST);
hypre_VectorData(vector) = NULL;
hypre_VectorSize(vector) = size;
hypre_VectorNumVectors(vector) = 1;
hypre_VectorMultiVecStorageMethod(vector) = 0;
/* set defaults */
hypre_VectorOwnsData(vector) = 1;
hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle());
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultiVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Vector *vector = hypre_SeqVectorCreate(size);
hypre_VectorNumVectors(vector) = num_vectors;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorDestroy( hypre_Vector *vector )
{
HYPRE_Int ierr=0;
if (vector)
{
HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector);
if ( hypre_VectorOwnsData(vector) )
{
hypre_TFree(hypre_VectorData(vector), memory_location);
}
hypre_TFree(vector, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(vector);
HYPRE_Int ierr = 0;
HYPRE_Int num_vectors = hypre_VectorNumVectors(vector);
HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector);
hypre_VectorMemoryLocation(vector) = memory_location;
/* Caveat: for pre-existing data, the memory location must be guaranteed
* to be consistent with `memory_location'
* Otherwise, mismatches will exist and problems will be encountered
* when being used, and freed */
if ( !hypre_VectorData(vector) )
{
hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors*size, memory_location);
}
if ( multivec_storage_method == 0 )
{
hypre_VectorVectorStride(vector) = size;
hypre_VectorIndexStride(vector) = 1;
}
else if ( multivec_storage_method == 1 )
{
hypre_VectorVectorStride(vector) = 1;
hypre_VectorIndexStride(vector) = num_vectors;
}
else
{
++ierr;
}
return ierr;
}
HYPRE_Int
hypre_SeqVectorInitialize( hypre_Vector *vector )
{
HYPRE_Int ierr;
ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetDataOwner( hypre_Vector *vector,
HYPRE_Int owns_data )
{
HYPRE_Int ierr=0;
hypre_VectorOwnsData(vector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* ReadVector
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorRead( char *file_name )
{
hypre_Vector *vector;
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size;
HYPRE_Int j;
/*----------------------------------------------------------
* Read in the data
*----------------------------------------------------------*/
fp = fopen(file_name, "r");
hypre_fscanf(fp, "%d", &size);
vector = hypre_SeqVectorCreate(size);
hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST;
hypre_SeqVectorInitialize(vector);
data = hypre_VectorData(vector);
for (j = 0; j < size; j++)
{
hypre_fscanf(fp, "%le", &data[j]);
}
fclose(fp);
/* multivector code not written yet */
hypre_assert( hypre_VectorNumVectors(vector) == 1 );
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorPrint( hypre_Vector *vector,
char *file_name )
{
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size, num_vectors, vecstride, idxstride;
HYPRE_Int i, j;
HYPRE_Complex value;
HYPRE_Int ierr = 0;
num_vectors = hypre_VectorNumVectors(vector);
vecstride = hypre_VectorVectorStride(vector);
idxstride = hypre_VectorIndexStride(vector);
/*----------------------------------------------------------
* Print in the data
*----------------------------------------------------------*/
data = hypre_VectorData(vector);
size = hypre_VectorSize(vector);
fp = fopen(file_name, "w");
if ( hypre_VectorNumVectors(vector) == 1 )
{
hypre_fprintf(fp, "%d\n", size);
}
else
{
hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size );
}
if ( num_vectors>1 )
{
for ( j=0; j<num_vectors; ++j )
{
hypre_fprintf(fp, "vector %d\n", j );
for (i = 0; i < size; i++)
{
value = data[ j*vecstride + i*idxstride ];
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(value), hypre_cimag(value));
#else
hypre_fprintf(fp, "%.14e\n", value);
#endif
}
}
}
else
{
for (i = 0; i < size; i++)
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(data[i]), hypre_cimag(data[i]));
#else
hypre_fprintf(fp, "%.14e\n", data[i]);
#endif
}
}
fclose(fp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetConstantValues( hypre_Vector *v,
HYPRE_Complex value )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(v);
//hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
if (size > 0)
{
HYPRE_THRUST_CALL( fill_n, vector_data, size, value );
}
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
vector_data[i] = value;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetRandomValues( hypre_Vector *v,
HYPRE_Int seed )
{
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int i;
HYPRE_Int ierr = 0;
hypre_SeedRand(seed);
size *= hypre_VectorNumVectors(v);
if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST)
{
/* RDF: threading this loop may cause problems because of hypre_Rand() */
for (i = 0; i < size; i++)
{
vector_data[i] = 2.0 * hypre_Rand() - 1.0;
}
}
else
{
HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
for (i = 0; i < size; i++)
{
h_data[i] = 2.0 * hypre_Rand() - 1.0;
}
hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST);
hypre_TFree(h_data, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCopy
* copies data from x to y
* if size of x is larger than y only the first size_y elements of x are
* copied to y
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorCopy( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x);
hypre_TMemcpy( hypre_VectorData(y),
hypre_VectorData(x),
HYPRE_Complex,
size,
hypre_VectorMemoryLocation(y),
hypre_VectorMemoryLocation(x) );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_Vector*
hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_SeqVectorInitialize_v2(y, memory_location);
hypre_SeqVectorCopy( x, y );
return y;
}
hypre_Vector*
hypre_SeqVectorCloneDeep( hypre_Vector *x )
{
return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x));
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneShallow
* Returns a complete copy of x - a shallow copy, pointing the data of x
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneShallow( hypre_Vector *x )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x);
hypre_VectorData(y) = hypre_VectorData(x);
hypre_SeqVectorSetDataOwner( y, 0 );
hypre_SeqVectorInitialize(y);
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorScale( HYPRE_Complex alpha,
hypre_Vector *y )
{
/* special cases */
if (alpha == 1.0)
{
return 0;
}
if (alpha == 0.0)
{
return hypre_SeqVectorSetConstantValues(y, 0.0);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(y);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(y);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] *= alpha;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorAxpy( HYPRE_Complex alpha,
hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/* y = y + x ./ b */
HYPRE_Int
hypre_SeqVectorElmdivpy( hypre_Vector *x,
hypre_Vector *b,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(b);
#if defined(HYPRE_USING_CUDA)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
if (exec == HYPRE_EXEC_DEVICE)
{
//TODO
//hypre_SeqVectorElmdivpyDevice(x, b, y);
/*
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms)
#endif
*/
hypreDevice_IVAXPY(size, b_data, x_data, y_data);
}
else
#endif
{
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += x_data[i] / b_data[i];
}
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Real result = 0.0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#ifndef HYPRE_COMPLEX
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) );
#else
result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 );
#endif
#else
/* TODO */
#error "Complex inner product"
#endif
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
result += hypre_conj(y_data[i]) * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return result;
}
//TODO
/*--------------------------------------------------------------------------
* hypre_VectorSumElts:
* Returns the sum of all vector elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_VectorData( vector );
HYPRE_Int size = hypre_VectorSize( vector );
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for ( i=0; i<size; ++i ) sum += data[i];
return sum;
}
HYPRE_Int
hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location)
{
HYPRE_Int ierr = 0;
#ifdef HYPRE_USING_UNIFIED_MEMORY
if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE)
{
/* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/
return 1;
}
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x);
if (size == 0)
{
return ierr;
}
hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location);
#endif
return ierr;
}
#if 0
/* y[i] = max(alpha*x[i], beta*y[i]) */
HYPRE_Int
hypre_SeqVectorMax( HYPRE_Complex alpha,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
thrust::maximum<HYPRE_Complex> mx;
#if defined(HYPRE_USING_CUDA)
HYPRE_THRUST_CALL( transform,
thrust::make_transform_iterator(x_data, alpha * _1),
thrust::make_transform_iterator(x_data + size, alpha * _1),
thrust::make_transform_iterator(y_data, beta * _1),
y_data,
mx );
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]);
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle());
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
#endif
|
GB_binop__min_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp32)
// A*D function (colscale): GB (_AxD__min_fp32)
// D*A function (rowscale): GB (_DxB__min_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__min_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__min_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp32)
// C=scalar+B GB (_bind1st__min_fp32)
// C=scalar+B' GB (_bind1st_tran__min_fp32)
// C=A+scalar GB (_bind2nd__min_fp32)
// C=A'+scalar GB (_bind2nd_tran__min_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = fminf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fminf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_FP32 || GxB_NO_MIN_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fminf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fminf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fminf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fminf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
table_based.h | #ifndef SMTBX_STRUCTURE_FACTORS_DIRECT_TABLE_BASED_H
#define SMTBX_STRUCTURE_FACTORS_DIRECT_TABLE_BASED_H
#include <smtbx/error.h>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/shared_ptr.hpp>
#include <smtbx/structure_factors/direct/standard_xray.h>
#include <cctbx/miller/lookup_utils.h>
#include <fstream>
namespace smtbx { namespace structure_factors { namespace table_based {
template <typename FloatType>
class table_data {
public:
typedef FloatType float_type;
typedef std::complex<float_type> complex_type;
protected:
// rows in order of original hkl index -> scatterer contribution
af::shared<std::vector<complex_type> > data_;
af::shared<cctbx::miller::index<> > miller_indices_;
bool use_ad;
af::shared<sgtbx::rot_mx> rot_mxs_;
bool expanded;
public:
table_data()
: expanded(false),
use_ad(false)
{}
af::shared<std::vector<complex_type> > const &data() const {
return data_;
}
af::shared<sgtbx::rot_mx> const &rot_mxs() const {
return rot_mxs_;
}
af::shared<cctbx::miller::index<> > const &miller_indices() const {
return miller_indices_;
}
bool use_AD() const {
return use_ad;
}
bool is_expanded() const {
return expanded;
}
};
template <typename FloatType>
class table_reader : public table_data<FloatType> {
public:
typedef FloatType float_type;
typedef std::complex<float_type> complex_type;
private:
typedef table_data<FloatType> parent_t;
void read(af::shared<xray::scatterer<float_type> > const &scatterers,
const std::string &file_name)
{
using namespace std;
ifstream in_file(file_name.c_str());
string line;
vector<std::string> toks;
size_t lc = 0;
vector<size_t> sc_indices(scatterers.size());
bool header_read = false;
while (std::getline(in_file, line)) {
lc++;
boost::trim(line);
if (line.empty()) {
break;
}
toks.clear();
// is header?
if (!header_read) {
boost::split(toks, line, boost::is_any_of(":"));
if (toks.size() < 2) {
continue;
}
if (boost::iequals(toks[0], "scatterers")) {
std::vector<std::string> stoks;
boost::trim(toks[1]);
boost::split(stoks, toks[1], boost::is_any_of(" "));
SMTBX_ASSERT(stoks.size() == scatterers.size());
map<string, size_t> sc_map;
for (size_t sci = 0; sci < scatterers.size(); sci++) {
sc_map[boost::to_upper_copy(scatterers[sci].label)] = sci;
}
for (size_t sci = 0; sci < scatterers.size(); sci++) {
boost::to_upper(stoks[sci]);
map<string, size_t>::iterator fsci = sc_map.find(stoks[sci]);
SMTBX_ASSERT(fsci != sc_map.end());
sc_indices[sci] = fsci->second;
}
}
else if (boost::iequals(toks[0], "AD")) {
boost::trim(toks[1]);
parent_t::use_ad = boost::iequals(toks[1], "false");
}
else if (boost::iequals(toks[0], "Symm")) {
boost::trim(toks[1]);
if (boost::iequals(toks[1], "expanded")) {
parent_t::expanded = true;
}
else {
vector<std::string> symms_toks;
boost::split(symms_toks, toks[1], boost::is_any_of(";"));
for (size_t sti = 0; sti < symms_toks.size(); sti++) {
boost::trim(symms_toks[sti]);
if (symms_toks[sti].empty()) {
break;
}
vector<std::string> symm_toks;
boost::split(symm_toks, symms_toks[sti], boost::is_any_of(" "));
SMTBX_ASSERT(symm_toks.size() == 9);
sgtbx::rot_mx rmx;
for (size_t mei = 0; mei < 9; mei++) {
rmx[mei] = boost::lexical_cast<int>(symm_toks[mei]);
}
parent_t::rot_mxs_.push_back(rmx);
}
}
}
else if (boost::iequals(toks[0], "data")) {
header_read = true;
}
}
// data
else {
boost::split(toks, line, boost::is_any_of(" "));
SMTBX_ASSERT(toks.size() == 3 + scatterers.size());
cctbx::miller::index<> mi(
atoi(toks[0].c_str()),
atoi(toks[1].c_str()),
atoi(toks[2].c_str()));
parent_t::miller_indices_.push_back(mi);
vector<complex_type> row;
row.resize(scatterers.size());
#pragma omp parallel for
for (int sci = 3; sci < toks.size(); sci++) {
size_t ci = toks[sci].find(',');
if (ci != string::npos) {
complex_type v(
atof(toks[sci].substr(0, ci).c_str()),
atof(toks[sci].substr(ci + 1).c_str()));
row[sc_indices[sci - 3]] = v;
}
else {
row[sc_indices[sci - 3]] = complex_type(
boost::lexical_cast<float_type>(toks[sci]));
}
}
parent_t::data_.push_back(row);
}
}
}
public:
table_reader(af::shared<xray::scatterer<float_type> > const &scatterers,
const std::string &file_name)
{
read(scatterers, file_name);
}
};
template <typename FloatType>
class table_based_isotropic
: public direct::one_scatterer_one_h::scatterer_contribution<FloatType>
{
typedef direct::one_scatterer_one_h::scatterer_contribution<FloatType>
base_type;
public:
typedef FloatType float_type;
typedef std::complex<float_type> complex_type;
private:
miller::lookup_utils::lookup_tensor<float_type> mi_lookup;
// hkl x scatterer x contribution
af::shared <std::vector<complex_type> > data;
public:
// Copy constructor
table_based_isotropic(const table_based_isotropic &tbsc)
:
mi_lookup(tbsc.mi_lookup),
data(tbsc.data)
{}
table_based_isotropic(
af::shared< xray::scatterer<float_type> > const &scatterers,
table_reader<FloatType> const &data_,
sgtbx::space_group const &space_group,
bool anomalous_flag)
:
data(data_.miller_indices().size())
{
SMTBX_ASSERT(data_.rot_mxs().size() <= 1);
for (size_t i = 0; i < data.size(); i++) {
data[i].resize(scatterers.size());
for (size_t j = 0; j < scatterers.size(); j++) {
data[i][j] = data_.data()[i][j];
}
}
mi_lookup = miller::lookup_utils::lookup_tensor<float_type>(
data_.miller_indices().const_ref(),
space_group,
anomalous_flag);
}
virtual complex_type get(std::size_t scatterer_idx,
miller::index<> const &h) const
{
long h_idx = mi_lookup.find_hkl(h);
SMTBX_ASSERT(h_idx >= 0);
return data[static_cast<size_t>(h_idx)][scatterer_idx];
}
virtual std::vector<complex_type> const &get_full(std::size_t scatterer_idx,
miller::index<> const &h) const
{
SMTBX_NOT_IMPLEMENTED();
throw 1;
}
virtual base_type &at_d_star_sq(
float_type d_star_sq)
{
return *this;
}
virtual bool is_spherical() const {
return true;
}
virtual base_type *raw_fork() const {
return new table_based_isotropic(*this);
}
};
template <typename FloatType>
class table_based_anisotropic
: public direct::one_scatterer_one_h::scatterer_contribution<FloatType>
{
typedef direct::one_scatterer_one_h::scatterer_contribution<FloatType>
base_type;
public:
typedef FloatType float_type;
typedef std::complex<float_type> complex_type;
private:
miller::lookup_utils::lookup_tensor<float_type> mi_lookup;
// hkl x scatterer x hkl*r contribution
af::shared <af::shared<std::vector<complex_type> > > data;
public:
// Copy constructor
table_based_anisotropic(const table_based_anisotropic &tbsc)
:
mi_lookup(tbsc.mi_lookup),
data(tbsc.data)
{}
table_based_anisotropic(
af::shared< xray::scatterer<float_type> > const &scatterers,
table_reader<FloatType> const &data_,
sgtbx::space_group const &space_group,
bool anomalous_flag)
{
SMTBX_ASSERT(data_.rot_mxs().size() == space_group.n_smx());
SMTBX_ASSERT((data_.data().size() % space_group.n_smx()) == 0);
std::vector<size_t> r_map;
r_map.resize(space_group.n_smx());
for (std::size_t i = 0; i < space_group.n_smx(); i++) {
sgtbx::rot_mx const& r = data_.rot_mxs()[i];
bool found = false;
for (size_t mi = 0; mi < space_group.n_smx(); mi++) {
if (r == space_group.smx(mi).r()) {
r_map[i] = mi;
found = true;
break;
}
}
SMTBX_ASSERT(found);
}
data.resize(data_.data().size() / space_group.n_smx());
af::shared<cctbx::miller::index<> > lookup_indices(data.size());
for (size_t hi = 0; hi < data.size(); hi++) {
af::shared<std::vector<complex_type> > row(scatterers.size());
for (size_t sci = 0; sci < scatterers.size(); sci++) {
std::vector<complex_type> h_row(space_group.n_smx());
for (size_t mi = 0; mi < space_group.n_smx(); mi++) {
const size_t r_off = data.size() * mi;
miller::index<> h =
data_.miller_indices()[hi] * space_group.smx(r_map[mi]).r();
SMTBX_ASSERT(h == data_.miller_indices()[r_off + hi]);
h_row[r_map[mi]] = data_.data()[r_off + hi][sci];
}
row[sci] = h_row;
}
data[hi] = row;
lookup_indices[hi] = data_.miller_indices()[hi];
}
mi_lookup = miller::lookup_utils::lookup_tensor<float_type>(
lookup_indices.const_ref(),
space_group,
anomalous_flag);
}
virtual complex_type get(std::size_t scatterer_idx,
miller::index<> const &h) const
{
SMTBX_NOT_IMPLEMENTED();
throw 1;
}
virtual std::vector<complex_type> const &get_full(std::size_t scatterer_idx,
miller::index<> const &h) const
{
long h_idx = mi_lookup.find_hkl(h);
SMTBX_ASSERT(h_idx >= 0);
return data[static_cast<size_t>(h_idx)][scatterer_idx];
}
virtual base_type &at_d_star_sq(
float_type d_star_sq)
{
return *this;
}
virtual bool is_spherical() const {
return false;
}
virtual base_type *raw_fork() const {
return new table_based_anisotropic(*this);
}
};
template <typename FloatType>
class lookup_based_anisotropic
: public direct::one_scatterer_one_h::scatterer_contribution<FloatType>
{
typedef direct::one_scatterer_one_h::scatterer_contribution<FloatType>
base_type;
public:
typedef FloatType float_type;
typedef std::complex<float_type> complex_type;
private:
typedef std::map<cctbx::miller::index<>, std::size_t,
cctbx::miller::fast_less_than<> > lookup_t;
lookup_t mi_lookup;
sgtbx::space_group const &space_group;
af::shared<std::vector<complex_type> > data;
bool anomalous_flag;
mutable std::vector<complex_type> tmp;
public:
// Copy constructor
lookup_based_anisotropic(const lookup_based_anisotropic &lbsc)
:
mi_lookup(lbsc.mi_lookup),
space_group(lbsc.space_group),
data(lbsc.data),
anomalous_flag(lbsc.anomalous_flag),
tmp(lbsc.tmp.size())
{}
lookup_based_anisotropic(
af::shared< xray::scatterer<float_type> > const &scatterers,
table_reader<FloatType> const &data_,
sgtbx::space_group const &space_group,
bool anomalous_flag)
:
space_group(space_group),
data(data_.miller_indices().size()),
anomalous_flag(anomalous_flag),
tmp(space_group.n_smx())
{
SMTBX_ASSERT(data_.rot_mxs().size() <= 1);
SMTBX_ASSERT(data_.is_expanded());
for (size_t i = 0; i < data.size(); i++) {
mi_lookup[data_.miller_indices()[i]] = i;
data[i].resize(scatterers.size());
for (size_t j = 0; j < scatterers.size(); j++) {
data[i][j] = data_.data()[i][j];
}
}
}
// for testing
lookup_based_anisotropic(
uctbx::unit_cell const &unit_cell,
sgtbx::space_group const &space_group,
af::shared<xray::scatterer<float_type> > const &scatterers,
direct::one_scatterer_one_h::isotropic_scatterer_contribution<FloatType> &isc,
af::shared<cctbx::miller::index<> > const &indices)
:
space_group(space_group),
data(indices.size()*space_group.n_smx()),
tmp(space_group.n_smx())
{
for (size_t i = 0; i < indices.size(); i++) {
float_type d_star_sq = unit_cell.d_star_sq(indices[i]);
direct::one_scatterer_one_h::scatterer_contribution<FloatType> const& sc =
isc.at_d_star_sq(d_star_sq);
for (size_t j = 0; j < space_group.n_smx(); j++) {
size_t d_off = j * indices.size() + i;
miller::index<> h = indices[i] * space_group.smx(j).r();
mi_lookup[h] = d_off;
data[d_off].resize(scatterers.size());
for (size_t k = 0; k < scatterers.size(); k++) {
complex_type v = sc.get(k, h);
if (scatterers[k].flags.use_fp_fdp()) { // revert of applied...
v = complex_type(v.real() - scatterers[k].fp, 0);
}
data[d_off][k] = v;
}
}
}
}
virtual complex_type get(std::size_t scatterer_idx,
miller::index<> const &h) const
{
SMTBX_NOT_IMPLEMENTED();
throw 1;
}
virtual std::vector<complex_type> const &get_full(std::size_t scatterer_idx,
miller::index<> const &h_) const
{
for (std::size_t i = 0; i < space_group.n_smx(); i++) {
miller::index<> h = h_ * space_group.smx(i).r();
lookup_t::const_iterator l = mi_lookup.find(h);
if (l == mi_lookup.end() && !anomalous_flag) {
miller::index<> h_bar = -h;
l = mi_lookup.find(h_bar);
SMTBX_ASSERT(l != mi_lookup.end())(h_bar.as_string());
tmp[i] = std::conj(data[l->second][scatterer_idx]);
}
else{
SMTBX_ASSERT(l != mi_lookup.end())(h.as_string());
tmp[i] = data[l->second][scatterer_idx];
}
}
return tmp;
}
virtual base_type &at_d_star_sq(
float_type d_star_sq)
{
return *this;
}
virtual bool is_spherical() const {
return false;
}
virtual base_type *raw_fork() const {
return new lookup_based_anisotropic(*this);
}
};
template <typename FloatType>
struct builder {
static direct::one_scatterer_one_h::scatterer_contribution<FloatType> *
build(
af::shared< xray::scatterer<FloatType> > const &scatterers,
std::string const &file_name,
sgtbx::space_group const &space_group,
bool anomalous_flag)
{
table_reader<FloatType> data(scatterers, file_name);
if(!data.use_AD()){
anomalous_flag = false;
}
if (data.rot_mxs().size() <= 1) {
if (data.is_expanded()) {
return new lookup_based_anisotropic<FloatType>(
scatterers,
data,
space_group,
anomalous_flag);
}
else {
return new table_based_isotropic<FloatType>(
scatterers,
data,
space_group,
anomalous_flag);
}
}
return new table_based_anisotropic<FloatType>(
scatterers,
data,
space_group,
anomalous_flag);
}
static direct::one_scatterer_one_h::scatterer_contribution<FloatType> *
build_lookup_based_for_tests(
uctbx::unit_cell const &unit_cell,
sgtbx::space_group const &space_group,
af::shared<xray::scatterer<FloatType> > const &scatterers,
xray::scattering_type_registry const &scattering_type_registry,
af::shared<cctbx::miller::index<> > const &indices)
{
direct::one_scatterer_one_h::isotropic_scatterer_contribution<FloatType>
isc(scatterers, scattering_type_registry);
return new lookup_based_anisotropic<FloatType>(
unit_cell,
space_group,
scatterers,
isc,
indices);
}
};
}}} // smtbx::structure_factors::table_based
#endif // GUARD
|
16_omp_reduction.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
extern void MPI_send(void*);
float sum(const float* a, int n) {
float total = 0.;
#pragma omp parallel for reduction(+ : total)
for (int i = 0; i < n; i++) {
total += a[i];
}
return total;
}
void foo() {
const int n = 10;
float array[n] = {0};
// check-inst: define {{.*}} @foo
// check-inst: %loc = alloca
// check-inst: %0 = bitcast float* %loc to i8*
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 5, i64 1)
// check-inst-not: __typeart_alloc_stack_omp
float loc = sum(array, n);
MPI_send((void*)&loc);
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK: Malloc : 0
// CHECK: Free : 0
// CHECK: Alloca : 1
// CHECK: Global : 0
|
simdtruedep-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// This one has data races due to true dependence.
// But data races happen at instruction level, not thread level.
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
int a[100], b[100];
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i+1;
}
#pragma omp simd
for (i=0;i<len-1;i++)
a[i+1]=a[i]+b[i];
for (i=0;i<len;i++)
printf("i=%d a[%d]=%d\n",i,i,a[i]);
return 0;
}
|
pr29965-5.c | /* PR middle-end/29965 */
/* Test that OpenMP construct bodies which never return don't cause ICEs. */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp" } */
extern void baz (void) __attribute__ ((noreturn));
void
foo1 (void)
{
int i;
#pragma omp for schedule (static)
for (i = 0; i < 2834; i++)
for (;;)
;
}
void
bar1 (void)
{
int i;
#pragma omp for schedule (static)
for (i = 0; i < 2834; i++)
baz ();
}
void
foo2 (void)
{
int i;
#pragma omp parallel for schedule (static)
for (i = 0; i < 2834; i++)
for (;;)
;
}
void
bar2 (void)
{
int i;
#pragma omp parallel for schedule (static)
for (i = 0; i < 2834; i++)
baz ();
}
|
util.h | #pragma once
#include "datatypes.h"
#include "timing.h"
class Util {
public:
static void updatePerLocationAgentLists(const thrust::device_vector<unsigned>& locationOfAgents,
thrust::device_vector<unsigned>& locationIdsOfAgents,
thrust::device_vector<unsigned>& locationAgentList,
thrust::device_vector<unsigned>& locationListOffsets);
};
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
template<typename UnaryFunction, typename Count_t, typename PPState_t>
__global__ void reduce_by_location_kernel(unsigned* locationListOffsetsPtr,
unsigned* locationAgentListPtr,
Count_t* fullInfectedCountsPtr,
PPState_t* PPValuesPtr,
unsigned numLocations,
UnaryFunction lam) {
unsigned l = threadIdx.x + blockIdx.x * blockDim.x;
if (l < numLocations) {
for (unsigned agent = locationListOffsetsPtr[l]; agent < locationListOffsetsPtr[l + 1]; agent++) {
fullInfectedCountsPtr[l] += lam(PPValuesPtr[locationAgentListPtr[agent]]);
}
}
}
template<typename UnaryFunction, typename Count_t, typename PPState_t>
__global__ void reduce_by_location_kernel_atomics(const unsigned* agentLocationsPtr,
Count_t* fullInfectedCountsPtr,
PPState_t* PPValuesPtr,
unsigned numAgents,
UnaryFunction lam) {
unsigned agent = threadIdx.x + blockIdx.x * blockDim.x;
if (agent < numAgents) { atomicAdd(&fullInfectedCountsPtr[agentLocationsPtr[agent]], lam(PPValuesPtr[agent])); }
}
#endif
template<typename UnaryFunction, typename Count_t, typename PPState_t>
void reduce_by_location(thrust::device_vector<unsigned>& locationListOffsets,
thrust::device_vector<unsigned>& locationAgentList,
thrust::device_vector<Count_t>& fullInfectedCounts,
thrust::device_vector<PPState_t>& PPValues,
const thrust::device_vector<unsigned>& agentLocations,
UnaryFunction lam) {
unsigned numLocations = locationListOffsets.size() - 1;
unsigned* locationListOffsetsPtr = thrust::raw_pointer_cast(locationListOffsets.data());
Count_t* fullInfectedCountsPtr = thrust::raw_pointer_cast(fullInfectedCounts.data());
PPState_t* PPValuesPtr = thrust::raw_pointer_cast(PPValues.data());
const unsigned* agentLocationsPtr = thrust::raw_pointer_cast(agentLocations.data());
unsigned* locationAgentListPtr = thrust::raw_pointer_cast(locationAgentList.data());
unsigned numAgents = PPValues.size();
// PROFILE_FUNCTION();
if (numLocations == 1) {
fullInfectedCounts[0] = thrust::reduce(thrust::make_transform_iterator(PPValues.begin(), lam),
thrust::make_transform_iterator(PPValues.end(), lam),
(Count_t)0.0f);
} else {
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_OMP
#pragma omp parallel for
for (unsigned l = 0; l < numLocations; l++) {
for (unsigned agent = locationListOffsetsPtr[l]; agent < locationListOffsetsPtr[l + 1]; agent++) {
fullInfectedCountsPtr[l] += lam(PPValuesPtr[locationAgentListPtr[agent]]);
}
}
#elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
#define ATOMICS
#ifdef ATOMICS
reduce_by_location_kernel_atomics<<<(numAgents - 1) / 256 + 1, 256>>>(
agentLocationsPtr, fullInfectedCountsPtr, PPValuesPtr, numAgents, lam);
#else
#error \
"util.cpp's locationListOffsets computation CUDA pathway relies on atomics version, as this one needs locationListOffsets to already exist"
reduce_by_location_kernel<<<(numLocations - 1) / 256 + 1, 256>>>(
locationListOffsetsPtr, locationAgentListPtr, fullInfectedCountsPtr, PPValuesPtr, numLocations, lam);
#endif
cudaDeviceSynchronize();
#endif
}
} |
GB_to_nonhyper.c | //------------------------------------------------------------------------------
// GB_to_nonhyper: convert a matrix to non-hypersparse form
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// On input, the matrix may have shallow A->p and A->h content; it is safely
// removed. On output, the matrix is always non-hypersparse (even if out of
// memory). If the input matrix is hypersparse, it is given a new A->p that is
// not shallow. If the input matrix is already non-hypersparse, nothing is
// changed (and in that case A->p remains shallow on output if shallow on
// input). The A->x and A->i content is not changed; it remains in whatever
// shallow/non-shallow state that it had on input).
// If an out-of-memory condition occurs, all content of the matrix is cleared.
// The input matrix may be jumbled; this is not an error condition.
#include "GB.h"
GrB_Info GB_to_nonhyper // convert a matrix to non-hypersparse
(
GrB_Matrix A, // matrix to convert to non-hypersparse
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_OK_OR_JUMBLED (GB_check (A, "A being converted to nonhyper", GB0)) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
//--------------------------------------------------------------------------
// convert A to non-hypersparse form
//--------------------------------------------------------------------------
if (A->is_hyper)
{
//----------------------------------------------------------------------
// determine the number of threads to use
//----------------------------------------------------------------------
int64_t n = A->vdim ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//----------------------------------------------------------------------
// allocate the new Ap array, of size n+1
//----------------------------------------------------------------------
int64_t *restrict Ap_new ;
GB_MALLOC_MEMORY (Ap_new, n+1, sizeof (int64_t)) ;
if (Ap_new == NULL)
{
// out of memory
A->is_hyper = false ; // A is non-hypersparse, but invalid
GB_PHIX_FREE (A) ;
return (GB_OUT_OF_MEMORY) ;
}
#ifdef GB_DEBUG
// to ensure all values of Ap_new are assigned below.
for (int64_t j = 0 ; j <= n ; j++) Ap_new [j] = -99999 ;
#endif
//----------------------------------------------------------------------
// get the old hyperlist
//----------------------------------------------------------------------
int64_t nvec = A->nvec ; // # of vectors in Ah_old
int64_t *restrict Ap_old = A->p ; // size nvec+1
int64_t *restrict Ah_old = A->h ; // size nvec
int64_t nvec_nonempty = 0 ; // recompute A->nvec_nonempty
int64_t anz = GB_NNZ (A) ;
//----------------------------------------------------------------------
// construct the new vector pointers
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nvec_nonempty)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, my_nvec_nonempty = 0 ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
ASSERT (0 <= jstart && jstart <= jend && jend <= n) ;
// task tid computes Ap_new [jstart:jend-1] from Ap_old, Ah_old.
// GB_BINARY_SPLIT_SEARCH of Ah_old [0..nvec-1] for jstart:
// If found is true then Ah_old [k] == jstart.
// If found is false, and nvec > 0 then
// Ah_old [0 ... k-1] < jstart < Ah_old [k ... nvec-1]
// Whether or not i is found, if nvec > 0
// Ah_old [0 ... k-1] < jstart <= Ah_old [k ... nvec-1]
// If nvec == 0, then k == 0 and found will be false. In this
// case, jstart cannot be compared with any content of Ah_old,
// since Ah_old is completely empty (Ah_old [0] is invalid).
int64_t k = 0, pright = nvec-1 ;
bool found ;
GB_BINARY_SPLIT_SEARCH (jstart, Ah_old, k, pright, found) ;
ASSERT (k >= 0 && k <= nvec) ;
ASSERT (GB_IMPLIES (nvec == 0, !found && k == 0)) ;
ASSERT (GB_IMPLIES (found, jstart == Ah_old [k])) ;
ASSERT (GB_IMPLIES (!found && k < nvec, jstart < Ah_old [k])) ;
// Let jk = Ah_old [k], jlast = Ah_old [k-1], and pk = Ah_old [k].
// Then Ap_new [jlast+1:jk] must be set to pk. This must be done
// for all k = 0:nvec-1. In addition, the last vector k=nvec-1
// must be terminated by setting Ap_new [jk+1:n-1] to Ap_old [nvec].
// A task owns the kth vector if jk is in jstart:jend-1, inclusive.
// It counts all non-empty vectors that it owns. However, the task
// must also set Ap_new [...] = pk for any jlast+1:jk that overlaps
// jstart:jend-1, even if it does not own that particular vector k.
// This happens only at the tail end of jstart:jend-1.
int64_t jlast = (k == 0) ? (-1) : Ah_old [k-1] ;
jlast = GB_IMAX (jstart-1, jlast) ;
bool done = false ;
for ( ; k <= nvec && !done ; k++)
{
//--------------------------------------------------------------
// get the kth vector in Ah_old, which is vector index jk.
//--------------------------------------------------------------
int64_t jk = (k < nvec) ? Ah_old [k] : n ;
int64_t pk = (k < nvec) ? Ap_old [k] : anz ;
//--------------------------------------------------------------
// determine if this task owns jk
//--------------------------------------------------------------
int64_t jfin ;
if (jk >= jend)
{
// This is the last iteration for this task. This task
// does not own the kth vector. However, it does own the
// vector indices jlast+1:jend-1, and these vectors must
// be handled by this task.
jfin = jend - 1 ;
done = true ;
}
else
{
// This task owns the kth vector, which is vector index jk.
// Ap must be set to pk for all vector indices jlast+1:jk.
jfin = jk ;
ASSERT (k >= 0 && k < nvec && nvec > 0) ;
if (pk < Ap_old [k+1]) my_nvec_nonempty++ ;
}
//--------------------------------------------------------------
// set Ap_new for this vector
//--------------------------------------------------------------
// Ap_new [jlast+1:jk] must be set to pk. This tasks handles
// the intersection of jlast+1:jk with jstart:jend-1.
for (int64_t j = jlast+1 ; j <= jfin ; j++)
{
Ap_new [j] = pk ;
}
//--------------------------------------------------------------
// keep track of the prior vector index
//--------------------------------------------------------------
jlast = jk ;
}
nvec_nonempty += my_nvec_nonempty ;
//------------------------------------------------------------------
// no task owns Ap_new [n] so it is set by the last task
//------------------------------------------------------------------
if (tid == ntasks-1)
{
ASSERT (jend == n) ;
Ap_new [n] = anz ;
}
}
// free the old A->p and A->h hyperlist content.
// this clears A->nvec_nonempty so it must be restored below.
GB_ph_free (A) ;
// transplant the new vector pointers; matrix is no longer hypersparse
A->p = Ap_new ;
A->h = NULL ;
A->is_hyper = false ;
A->nvec = n ;
A->nvec_nonempty = nvec_nonempty ;
A->plen = n ;
A->p_shallow = false ;
A->h_shallow = false ;
A->magic = GB_MAGIC ;
ASSERT (anz == GB_NNZ (A)) ;
ASSERT (A->nvec_nonempty == GB_nvec_nonempty (A, Context)) ;
}
//--------------------------------------------------------------------------
// A is now in non-hypersparse form
//--------------------------------------------------------------------------
ASSERT_OK_OR_JUMBLED (GB_check (A, "A converted to nonhypersparse", GB0)) ;
ASSERT (!(A->is_hyper)) ;
return (GrB_SUCCESS) ;
}
|
sum_int.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(int *X) {
for (int i = 0; i<N; i++) {
X[i] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
int sum(int *X) {
int result = 0;
#pragma omp simd reduction(+:result)
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
// Debug functions
int sum_serial(int *X) {
int result = 0;
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
void print_vector(int *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%d ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
int *X = malloc(sizeof(int)*N);
int result, result_serial;
srand(time(NULL));
init(X);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
result = sum(X);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
result_serial = sum_serial(X);
double t_serial = (read_timer() - start_serial);
print_vector(X);
puts("=\n");
printf("SIMD: %d\n", result);
puts("---------------------------------");
printf("Serial: %d\n", result_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %d\n", result_serial - result);
free(X);
return 0;
}
|
clip.h | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef __NUMPY_CLIP_H__
#define __NUMPY_CLIP_H__
#include "point_task.h"
namespace legate {
namespace numpy {
template <class T>
struct ClipOperation {
using argument_type = T;
constexpr static auto op_code = NumPyOpCode::NUMPY_CLIP;
__CUDA_HD__ constexpr T operator()(const T& a, const T min, const T max) const
{
return (a < min) ? min : (a > max) ? max : a;
}
};
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM, typename T, typename Args>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) gpu_clip(const Args args)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= args.volume) return;
const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo);
ClipOperation<T> func;
args.out[point] = func(args.in[point], args.min, args.max);
}
template <int DIM, typename T, typename Args>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
gpu_clip_inplace(const Args args)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= args.volume) return;
const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo);
ClipOperation<T> func;
args.inout[point] = func(args.inout[point], args.min, args.max);
}
#endif
// Clip is like a unary operation but with some state for its operator
template <class T>
class ClipTask : public PointTask<ClipTask<T>> {
private:
using argument_type = typename ClipOperation<T>::argument_type;
using result_type = typename ClipOperation<T>::argument_type;
public:
static const int TASK_ID =
task_id<ClipOperation<T>::op_code, NUMPY_NORMAL_VARIANT_OFFSET, argument_type, result_type>;
// out_region = op in_region;
static const int REGIONS = 2;
template <int N>
struct DeserializedArgs {
Legion::Rect<N> rect;
AccessorWO<result_type, N> out;
AccessorRO<argument_type, N> in;
Pitches<N - 1> pitches;
size_t volume;
argument_type min;
argument_type max;
result_type* outptr;
const argument_type* inptr;
bool deserialize(LegateDeserializer& derez,
const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions)
{
rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez);
out = derez.unpack_accessor_WO<result_type, N>(regions[0], rect);
in = derez.unpack_accessor_RO<argument_type, N>(regions[1], rect);
min = task->futures[0].get_result<argument_type>(true /*silence warnings*/);
max = task->futures[1].get_result<argument_type>(true /*slience warnings*/);
volume = pitches.flatten(rect);
#ifndef LEGION_BOUNDS_CHECKS
// Check to see if this is dense or not
return out.accessor.is_dense_row_major(rect) && in.accessor.is_dense_row_major(rect) &&
(outptr = out.ptr(rect)) && (inptr = in.ptr(rect));
#else
// No dense execution if we're doing bounds checks
return false;
#endif
}
};
template <int DIM>
static void dispatch_cpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
ClipOperation<T> func;
if (dense) {
for (size_t idx = 0; idx < args.volume; ++idx)
args.outptr[idx] = func(args.inptr[idx], args.min, args.max);
} else {
CPULoop<DIM>::unary_loop(func, args.out, args.in, args.rect, args.min, args.max);
}
}
#ifdef LEGATE_USE_OPENMP
template <int DIM>
static void dispatch_omp(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
ClipOperation<T> func;
if (dense) {
#pragma omp parallel for schedule(static)
for (size_t idx = 0; idx < args.volume; ++idx) {
args.outptr[idx] = func(args.inptr[idx], args.min, args.max);
}
} else {
OMPLoop<DIM>::unary_loop(func, args.out, args.in, args.rect, args.min, args.max);
}
}
#endif
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
args.deserialize(derez, task, regions);
if (args.volume == 0) return;
const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
gpu_clip<DIM, T, DeserializedArgs<DIM>><<<blocks, THREADS_PER_BLOCK>>>(args);
}
#elif defined(LEGATE_USE_CUDA)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez);
#endif
};
template <typename T>
class ClipInplace : public PointTask<ClipInplace<T>> {
private:
using argument_type = typename ClipOperation<T>::argument_type;
using result_type = typename ClipOperation<T>::argument_type;
public:
static const int TASK_ID =
task_id<ClipOperation<T>::op_code, NUMPY_INPLACE_VARIANT_OFFSET, result_type, argument_type>;
// inout_region = op(inout_region)
static const int REGIONS = 1;
template <int N>
struct DeserializedArgs {
Legion::Rect<N> rect;
AccessorRW<result_type, N> inout;
Pitches<N - 1> pitches;
size_t volume;
argument_type min;
argument_type max;
argument_type* inoutptr;
bool deserialize(LegateDeserializer& derez,
const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions)
{
rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez);
inout = derez.unpack_accessor_RW<result_type, N>(regions[0], rect);
min = task->futures[0].get_result<argument_type>(true /*silence warnings*/);
max = task->futures[1].get_result<argument_type>(true /*silence warnings*/);
volume = pitches.flatten(rect);
#ifndef LEGION_BOUNDS_CHECKS
// Check to see if this is dense or not
return inout.accessor.is_dense_row_major(rect) && (inoutptr = inout.ptr(rect));
#else
// No dense execution if we're doing bounds checks
return false;
#endif
}
};
template <int DIM>
static void dispatch_cpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
ClipOperation<T> func;
if (dense) {
for (size_t idx = 0; idx < args.volume; ++idx)
args.inoutptr[idx] = func(args.inoutptr[idx], args.min, args.max);
} else {
CPULoop<DIM>::unary_inplace(func, args.inout, args.rect, args.min, args.max);
}
}
#ifdef LEGATE_USE_OPENMP
template <int DIM>
static void dispatch_omp(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
ClipOperation<T> func;
if (dense) {
#pragma omp parallel for schedule(static)
for (size_t idx = 0; idx < args.volume; ++idx) {
args.inoutptr[idx] = func(args.inoutptr[idx], args.min, args.max);
}
} else {
OMPLoop<DIM>::unary_inplace(func, args.inout, args.rect, args.min, args.max);
}
}
#endif
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
args.deserialize(derez, task, regions);
if (args.volume == 0) return;
const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
gpu_clip_inplace<DIM, T, DeserializedArgs<DIM>><<<blocks, THREADS_PER_BLOCK>>>(args);
}
#elif defined(LEGATE_USE_CUDA)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez);
#endif
};
template <typename T>
class ClipScalar : public NumPyTask<ClipScalar<T>> {
private:
using argument_type = typename ClipOperation<T>::argument_type;
using result_type = typename ClipOperation<T>::argument_type;
public:
// XXX figure out how to hoist this into PointTask
static const int TASK_ID =
task_id<ClipOperation<T>::op_code, NUMPY_SCALAR_VARIANT_OFFSET, result_type, argument_type>;
static const int REGIONS = 0;
static result_type cpu_variant(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
Legion::Context ctx,
Legion::Runtime* runtime)
{
argument_type rhs = task->futures[0].get_result<argument_type>(true /*silence warnings*/);
argument_type min = task->futures[1].get_result<argument_type>(true /*silence warnings*/);
argument_type max = task->futures[2].get_result<argument_type>(true /*silence warnings*/);
ClipOperation<T> func;
return func(rhs, min, max);
}
private:
struct StaticRegistrar {
StaticRegistrar()
{
ClipScalar::template register_variants_with_return<result_type, argument_type>();
}
};
virtual void force_instantiation_of_static_registrar() { (void)&static_registrar; }
// this static member registers this task's variants during static initialization
static const StaticRegistrar static_registrar;
};
// this is the definition of ScalarUnaryOperationTask::static_registrar
template <class T>
const typename ClipScalar<T>::StaticRegistrar ClipScalar<T>::static_registrar{};
} // namespace numpy
} // namespace legate
#endif // __NUMPY_CLIP_H__
|
openmp.c | #include <stdio.h>
#include <assert.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#include <sys/time.h>
const double PI = 3.1415926535897932;
const long STEP_NUM = 1070596096;
const double STEP_LENGTH = 1.0 / 1070596096;
int main()
{
struct timeval startTime;
gettimeofday(&startTime, NULL);
double sum = 0.0;
double pi, x;
printf("\nStart calculating...\n");
// computational steps
#pragma omp parallel for reduction(+:sum) private(x) num_threads(16)
for(int i = 0;i < STEP_NUM; i++)
{
x = (i + 0.5) * STEP_LENGTH;
sum += 1.0 / (1.0 + x * x);
}
pi = STEP_LENGTH * sum * 4;
struct timeval endTime;
gettimeofday(&endTime, NULL);
printf("PI = %.16lf with error %.16lf\nTime elapsed : %lf seconds.\n\n", pi, fabs(pi - PI), (endTime.tv_sec - startTime.tv_sec) + ((double)(endTime.tv_usec - startTime.tv_usec) / 10E6 ));
assert(fabs(PI - pi) <= 0.001);
return 0;
}
|
npb_cg.c | # 1 "main.c"
# 0 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 21 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 1 "main.c"
# 4 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 27 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef int omp_lock_t ;
# 29 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
struct omp_nest_lock {
omp_lock_t act ;
short cnt ;
short tid ;
} ;
# 37 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef struct omp_nest_lock omp_nest_lock_t ;
# 41 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef enum omp_sched_t {
omp_sched_static = 1 ,
omp_sched_dynamic = 2 ,
omp_sched_guided = 3 ,
omp_sched_auto = 4
} omp_sched_t ;
# 52 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
extern void omp_set_num_threads ( int n ) ;
extern int omp_get_thread_num ( void ) ;
extern int omp_get_num_procs ( void ) ;
extern int omp_get_num_threads ( void ) ;
extern int omp_get_max_threads ( void ) ;
extern int omp_in_parallel ( void ) ;
extern int omp_in_final ( void ) ;
extern void omp_set_dynamic ( int n ) ;
extern int omp_get_dynamic ( void ) ;
extern void omp_set_nested ( int n ) ;
extern int omp_get_nested ( void ) ;
extern void omp_init_lock ( omp_lock_t * s ) ;
extern void omp_destroy_lock ( omp_lock_t * s ) ;
extern void omp_set_lock ( omp_lock_t * s ) ;
extern void omp_unset_lock ( omp_lock_t * s ) ;
extern int omp_test_lock ( omp_lock_t * s ) ;
extern void omp_init_nest_lock ( omp_nest_lock_t * s ) ;
extern void omp_destroy_nest_lock ( omp_nest_lock_t * s ) ;
extern void omp_set_nest_lock ( omp_nest_lock_t * s ) ;
extern void omp_unset_nest_lock ( omp_nest_lock_t * s ) ;
extern int omp_test_nest_lock ( omp_nest_lock_t * s ) ;
extern double omp_get_wtime ( void ) ;
extern double omp_get_wtick ( void ) ;
extern long omp_get_stack_size ( void ) ;
extern void omp_set_stack_size ( long l ) ;
extern int omp_get_thread_limit ( void ) ;
extern void omp_set_max_active_levels ( int ) ;
extern int omp_get_max_active_levels ( void ) ;
extern int omp_get_level ( void ) ;
extern int omp_get_ancestor_thread_num ( int ) ;
extern int omp_get_team_size ( int ) ;
extern int omp_get_active_level ( void ) ;
extern void omp_set_schedule ( omp_sched_t , int ) ;
extern void omp_get_schedule ( omp_sched_t * , int * ) ;
extern int omp_get_initial_device ( ) ;
extern int omp_get_default_device ( ) ;
extern void omp_set_default_device ( int ) ;
# 89 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 1 "/usr/include/stdlib.h" <System_Header>
# 16 "/usr/include/stdlib.h" <System_Header>
# 20 "/usr/include/stdlib.h" <System_Header>
# 24 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 93 "/usr/include/features.h" <System_Header>
# 96 "/usr/include/features.h" <System_Header>
# 123 "/usr/include/features.h" <System_Header>
# 134 "/usr/include/features.h" <System_Header>
# 145 "/usr/include/features.h" <System_Header>
# 156 "/usr/include/features.h" <System_Header>
# 181 "/usr/include/features.h" <System_Header>
# 191 "/usr/include/features.h" <System_Header>
# 197 "/usr/include/features.h" <System_Header>
# 203 "/usr/include/features.h" <System_Header>
# 212 "/usr/include/features.h" <System_Header>
# 220 "/usr/include/features.h" <System_Header>
# 344 "/usr/include/features.h" <System_Header>
# 345 "/usr/include/features.h" <System_Header>
# 1 "/usr/include/stdc-predef.h" <System_Header>
# 16 "/usr/include/stdc-predef.h" <System_Header>
# 27 "/usr/include/stdc-predef.h" <System_Header>
# 34 "/usr/include/stdc-predef.h" <System_Header>
# 54 "/usr/include/stdc-predef.h" <System_Header>
# 57 "/usr/include/stdc-predef.h" <System_Header>
# 346 "/usr/include/features.h" <System_Header>
# 352 "/usr/include/features.h" <System_Header>
# 357 "/usr/include/features.h" <System_Header>
# 364 "/usr/include/features.h" <System_Header>
# 367 "/usr/include/features.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 81 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 86 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 91 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 96 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 110 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 121 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 131 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 147 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 173 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 202 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 209 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 217 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 227 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 234 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 243 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 252 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 264 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 274 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 283 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 291 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 305 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 313 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 328 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 347 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 356 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 361 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 368 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 410 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 411 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 15 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 18 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 45 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 368 "/usr/include/features.h" <System_Header>
# 371 "/usr/include/features.h" <System_Header>
# 379 "/usr/include/features.h" <System_Header>
# 390 "/usr/include/features.h" <System_Header>
# 391 "/usr/include/features.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 3 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 10 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" <System_Header>
# 4 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 392 "/usr/include/features.h" <System_Header>
# 25 "/usr/include/stdlib.h" <System_Header>
# 26 "/usr/include/stdlib.h" <System_Header>
# 32 "/usr/include/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 216 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
typedef unsigned long int size_t ;
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 292 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 312 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 328 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
typedef int wchar_t ;
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 33 "/usr/include/stdlib.h" <System_Header>
# 40 "/usr/include/stdlib.h" <System_Header>
# 41 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 45 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
typedef enum
{
P_ALL ,
P_PID ,
P_PGID
} idtype_t ;
# 42 "/usr/include/stdlib.h" <System_Header>
# 42 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 36 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 47 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 52 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 55 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 64 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 21 "/usr/include/endian.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 22 "/usr/include/endian.h" <System_Header>
# 29 "/usr/include/endian.h" <System_Header>
# 35 "/usr/include/endian.h" <System_Header>
# 36 "/usr/include/endian.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/endian.h" <System_Header>
# 37 "/usr/include/endian.h" <System_Header>
# 39 "/usr/include/endian.h" <System_Header>
# 59 "/usr/include/endian.h" <System_Header>
# 60 "/usr/include/endian.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned char __u_char ;
typedef unsigned short int __u_short ;
typedef unsigned int __u_int ;
typedef unsigned long int __u_long ;
# 35 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef signed char __int8_t ;
typedef unsigned char __uint8_t ;
typedef signed short int __int16_t ;
typedef unsigned short int __uint16_t ;
typedef signed int __int32_t ;
typedef unsigned int __uint32_t ;
# 43 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef signed long int __int64_t ;
typedef unsigned long int __uint64_t ;
# 50 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 52 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __quad_t ;
typedef unsigned long int __u_quad_t ;
# 87 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 116 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 121 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 79 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 82 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 86 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 122 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 124 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned long int __dev_t ;
typedef unsigned int __uid_t ;
typedef unsigned int __gid_t ;
typedef unsigned long int __ino_t ;
typedef unsigned long int __ino64_t ;
typedef unsigned int __mode_t ;
typedef unsigned long int __nlink_t ;
typedef long int __off_t ;
typedef long int __off64_t ;
typedef int __pid_t ;
typedef struct { int __val [ 2 ] ; } __fsid_t ;
typedef long int __clock_t ;
typedef unsigned long int __rlim_t ;
typedef unsigned long int __rlim64_t ;
typedef unsigned int __id_t ;
typedef long int __time_t ;
typedef unsigned int __useconds_t ;
typedef long int __suseconds_t ;
# 143 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef int __daddr_t ;
typedef int __key_t ;
# 146 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef int __clockid_t ;
# 149 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef void * __timer_t ;
# 152 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __blksize_t ;
# 155 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 157 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __blkcnt_t ;
typedef long int __blkcnt64_t ;
# 161 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned long int __fsblkcnt_t ;
typedef unsigned long int __fsblkcnt64_t ;
# 165 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned long int __fsfilcnt_t ;
typedef unsigned long int __fsfilcnt64_t ;
# 169 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __fsword_t ;
# 172 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __ssize_t ;
# 174 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __syscall_slong_t ;
typedef unsigned long int __syscall_ulong_t ;
# 180 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef __off64_t __loff_t ;
typedef __quad_t * __qaddr_t ;
typedef char * __caddr_t ;
# 185 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __intptr_t ;
# 188 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned int __socklen_t ;
# 28 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 34 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 35 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header>
# 44 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header>
static unsigned short int
__bswap_16 ( unsigned short int __bsx )
{
return ( ( unsigned short int ) ( ( ( ( __bsx ) >> 8 ) & 0xff ) | ( ( ( __bsx ) & 0xff ) << 8 ) ) ) ;
}
# 36 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 87 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
static unsigned int
__bswap_32 ( unsigned int __bsx )
{
return ( ( ( ( __bsx ) & 0xff000000 ) >> 24 ) | ( ( ( __bsx ) & 0x00ff0000 ) >> 8 ) | ( ( ( __bsx ) & 0x0000ff00 ) << 8 ) | ( ( ( __bsx ) & 0x000000ff ) << 24 ) ) ;
}
# 148 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
static __uint64_t
__bswap_64 ( __uint64_t __bsx )
{
return ( ( ( ( __bsx ) & 0xff00000000000000ull ) >> 56 ) | ( ( ( __bsx ) & 0x00ff000000000000ull ) >> 40 ) | ( ( ( __bsx ) & 0x0000ff0000000000ull ) >> 24 ) | ( ( ( __bsx ) & 0x000000ff00000000ull ) >> 8 ) | ( ( ( __bsx ) & 0x00000000ff000000ull ) << 8 ) | ( ( ( __bsx ) & 0x0000000000ff0000ull ) << 24 ) | ( ( ( __bsx ) & 0x000000000000ff00ull ) << 40 ) | ( ( ( __bsx ) & 0x00000000000000ffull ) << 56 ) ) ;
}
# 61 "/usr/include/endian.h" <System_Header>
# 65 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
union wait
{
int w_status ;
struct
{
# 72 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
unsigned int __w_termsig : 7 ;
unsigned int __w_coredump : 1 ;
unsigned int __w_retcode : 8 ;
unsigned int : 16 ;
# 83 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
} __wait_terminated ;
struct
{
# 87 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
unsigned int __w_stopval : 8 ;
unsigned int __w_stopsig : 8 ;
unsigned int : 16 ;
# 96 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
} __wait_stopped ;
} ;
# 43 "/usr/include/stdlib.h" <System_Header>
# 47 "/usr/include/stdlib.h" <System_Header>
# 60 "/usr/include/stdlib.h" <System_Header>
# 83 "/usr/include/stdlib.h" <System_Header>
# 96 "/usr/include/stdlib.h" <System_Header>
typedef struct
{
int quot ;
int rem ;
} div_t ;
# 103 "/usr/include/stdlib.h" <System_Header>
# 105 "/usr/include/stdlib.h" <System_Header>
typedef struct
{
long int quot ;
long int rem ;
} ldiv_t ;
# 116 "/usr/include/stdlib.h" <System_Header>
typedef struct
{
long long int quot ;
long long int rem ;
} lldiv_t ;
# 127 "/usr/include/stdlib.h" <System_Header>
# 132 "/usr/include/stdlib.h" <System_Header>
# 137 "/usr/include/stdlib.h" <System_Header>
# 139 "/usr/include/stdlib.h" <System_Header>
extern size_t __ctype_get_mb_cur_max ( void ) ;
# 143 "/usr/include/stdlib.h" <System_Header>
extern double atof ( const char * __nptr )
;
extern int atoi ( const char * __nptr )
;
extern long int atol ( const char * __nptr )
;
# 156 "/usr/include/stdlib.h" <System_Header>
extern long long int atoll ( const char * __nptr )
;
# 163 "/usr/include/stdlib.h" <System_Header>
extern double strtod ( const char * __restrict __nptr ,
char * * __restrict __endptr )
;
# 171 "/usr/include/stdlib.h" <System_Header>
extern float strtof ( const char * __restrict __nptr ,
char * * __restrict __endptr ) ;
# 175 "/usr/include/stdlib.h" <System_Header>
extern long double strtold ( const char * __restrict __nptr ,
char * * __restrict __endptr )
;
# 182 "/usr/include/stdlib.h" <System_Header>
extern long int strtol ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
extern unsigned long int strtoul ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 193 "/usr/include/stdlib.h" <System_Header>
# 195 "/usr/include/stdlib.h" <System_Header>
extern long long int strtoq ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 200 "/usr/include/stdlib.h" <System_Header>
extern unsigned long long int strtouq ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 207 "/usr/include/stdlib.h" <System_Header>
# 209 "/usr/include/stdlib.h" <System_Header>
extern long long int strtoll ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 214 "/usr/include/stdlib.h" <System_Header>
extern unsigned long long int strtoull ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 304 "/usr/include/stdlib.h" <System_Header>
extern char * l64a ( long int __n ) ;
# 307 "/usr/include/stdlib.h" <System_Header>
extern long int a64l ( const char * __s )
;
# 314 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 20 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __u_char u_char ;
typedef __u_short u_short ;
typedef __u_int u_int ;
typedef __u_long u_long ;
typedef __quad_t quad_t ;
typedef __u_quad_t u_quad_t ;
typedef __fsid_t fsid_t ;
# 44 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __loff_t loff_t ;
# 48 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __ino_t ino_t ;
# 60 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __dev_t dev_t ;
# 65 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __gid_t gid_t ;
# 70 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __mode_t mode_t ;
# 75 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __nlink_t nlink_t ;
# 80 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __uid_t uid_t ;
# 86 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __off_t off_t ;
# 98 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __pid_t pid_t ;
# 104 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __id_t id_t ;
# 109 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __ssize_t ssize_t ;
# 115 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __daddr_t daddr_t ;
typedef __caddr_t caddr_t ;
# 122 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __key_t key_t ;
# 132 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 55 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 56 "/usr/include/time.h" <System_Header>
# 58 "/usr/include/time.h" <System_Header>
typedef __clock_t clock_t ;
# 71 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 72 "/usr/include/time.h" <System_Header>
# 74 "/usr/include/time.h" <System_Header>
typedef __time_t time_t ;
# 88 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 89 "/usr/include/time.h" <System_Header>
# 90 "/usr/include/time.h" <System_Header>
typedef __clockid_t clockid_t ;
# 100 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 101 "/usr/include/time.h" <System_Header>
# 102 "/usr/include/time.h" <System_Header>
typedef __timer_t timer_t ;
# 133 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 146 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 147 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 149 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef unsigned long int ulong ;
typedef unsigned short int ushort ;
typedef unsigned int uint ;
# 155 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 159 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 162 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef char int8_t ;
typedef short int int16_t ;
typedef int int32_t ;
# 166 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef long int int64_t ;
# 172 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef unsigned char u_int8_t ;
typedef unsigned short int u_int16_t ;
typedef unsigned int u_int32_t ;
# 177 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef unsigned long int u_int64_t ;
# 182 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef int register_t ;
# 215 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 216 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 217 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 218 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 219 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 19 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 47 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 31 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 32 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
typedef int __sig_atomic_t ;
# 24 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
typedef struct
{
unsigned long int __val [ ( 1024 / ( 8 * sizeof ( unsigned long int ) ) ) ] ;
} __sigset_t ;
# 39 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 34 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef __sigset_t sigset_t ;
# 40 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 116 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 117 "/usr/include/time.h" <System_Header>
# 119 "/usr/include/time.h" <System_Header>
struct timespec
{
__time_t tv_sec ;
__syscall_slong_t tv_nsec ;
} ;
# 44 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 45 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
struct timeval
{
__time_t tv_sec ;
__suseconds_t tv_usec ;
} ;
# 46 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 48 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef __suseconds_t suseconds_t ;
# 53 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef long int __fd_mask ;
# 56 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 58 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 63 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef struct
{
# 72 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
__fd_mask __fds_bits [ 1024 / ( 8 * ( int ) sizeof ( __fd_mask ) ) ] ;
# 75 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
} fd_set ;
# 77 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 81 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef __fd_mask fd_mask ;
# 84 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 89 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 105 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
extern int select ( int __nfds , fd_set * __restrict __readfds ,
fd_set * __restrict __writefds ,
fd_set * __restrict __exceptfds ,
struct timeval * __restrict __timeout ) ;
# 117 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
extern int pselect ( int __nfds , fd_set * __restrict __readfds ,
fd_set * __restrict __writefds ,
fd_set * __restrict __exceptfds ,
const struct timespec * __restrict __timeout ,
const __sigset_t * __restrict __sigmask ) ;
# 126 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 220 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 221 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 222 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
extern unsigned int gnu_dev_major ( unsigned long long int __dev )
;
# 30 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
extern unsigned int gnu_dev_minor ( unsigned long long int __dev )
;
# 33 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
extern unsigned long long int gnu_dev_makedev ( unsigned int __major ,
unsigned int __minor )
;
# 60 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 223 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 228 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __blksize_t blksize_t ;
# 232 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 235 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __blkcnt_t blkcnt_t ;
# 239 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __fsblkcnt_t fsblkcnt_t ;
# 243 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __fsfilcnt_t fsfilcnt_t ;
# 268 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 270 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 59 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef unsigned long int pthread_t ;
# 63 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
union pthread_attr_t
{
char __size [ 56 ] ;
long int __align ;
} ;
# 69 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union pthread_attr_t pthread_attr_t ;
# 75 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef struct __pthread_internal_list
{
struct __pthread_internal_list * __prev ;
struct __pthread_internal_list * __next ;
} __pthread_list_t ;
# 89 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
struct __pthread_mutex_s
{
int __lock ;
unsigned int __count ;
int __owner ;
# 98 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
unsigned int __nusers ;
# 100 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
int __kind ;
# 104 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
short __spins ;
short __elision ;
__pthread_list_t __list ;
# 108 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 125 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
} __data ;
char __size [ 40 ] ;
long int __align ;
} pthread_mutex_t ;
# 130 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 4 ] ;
int __align ;
} pthread_mutexattr_t ;
# 138 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
struct
{
int __lock ;
unsigned int __futex ;
unsigned long long int __total_seq ;
unsigned long long int __wakeup_seq ;
unsigned long long int __woken_seq ;
void * __mutex ;
unsigned int __nwaiters ;
unsigned int __broadcast_seq ;
} __data ;
char __size [ 48 ] ;
long long int __align ;
} pthread_cond_t ;
# 156 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 4 ] ;
int __align ;
} pthread_condattr_t ;
# 163 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef unsigned int pthread_key_t ;
# 167 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef int pthread_once_t ;
# 173 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
# 177 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
struct
{
int __lock ;
unsigned int __nr_readers ;
unsigned int __readers_wakeup ;
unsigned int __writer_wakeup ;
unsigned int __nr_readers_queued ;
unsigned int __nr_writers_queued ;
int __writer ;
int __shared ;
signed char __rwelision ;
# 192 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
unsigned char __pad1 [ 7 ] ;
# 195 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
unsigned long int __pad2 ;
unsigned int __flags ;
# 200 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
} __data ;
# 220 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
char __size [ 56 ] ;
long int __align ;
} pthread_rwlock_t ;
# 224 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 8 ] ;
long int __align ;
} pthread_rwlockattr_t ;
# 233 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef volatile int pthread_spinlock_t ;
# 238 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 32 ] ;
long int __align ;
} pthread_barrier_t ;
# 245 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 4 ] ;
int __align ;
} pthread_barrierattr_t ;
# 271 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 315 "/usr/include/stdlib.h" <System_Header>
# 319 "/usr/include/stdlib.h" <System_Header>
extern long int random ( void ) ;
# 323 "/usr/include/stdlib.h" <System_Header>
extern void srandom ( unsigned int __seed ) ;
# 329 "/usr/include/stdlib.h" <System_Header>
extern char * initstate ( unsigned int __seed , char * __statebuf ,
size_t __statelen ) ;
# 334 "/usr/include/stdlib.h" <System_Header>
extern char * setstate ( char * __statebuf ) ;
# 341 "/usr/include/stdlib.h" <System_Header>
# 343 "/usr/include/stdlib.h" <System_Header>
struct random_data
{
int32_t * fptr ;
int32_t * rptr ;
int32_t * state ;
int rand_type ;
int rand_deg ;
int rand_sep ;
int32_t * end_ptr ;
} ;
# 354 "/usr/include/stdlib.h" <System_Header>
extern int random_r ( struct random_data * __restrict __buf ,
int32_t * __restrict __result ) ;
# 357 "/usr/include/stdlib.h" <System_Header>
extern int srandom_r ( unsigned int __seed , struct random_data * __buf )
;
# 360 "/usr/include/stdlib.h" <System_Header>
extern int initstate_r ( unsigned int __seed , char * __restrict __statebuf ,
size_t __statelen ,
struct random_data * __restrict __buf )
;
# 365 "/usr/include/stdlib.h" <System_Header>
extern int setstate_r ( char * __restrict __statebuf ,
struct random_data * __restrict __buf )
;
# 373 "/usr/include/stdlib.h" <System_Header>
extern int rand ( void ) ;
extern void srand ( unsigned int __seed ) ;
# 380 "/usr/include/stdlib.h" <System_Header>
extern int rand_r ( unsigned int * __seed ) ;
# 386 "/usr/include/stdlib.h" <System_Header>
# 388 "/usr/include/stdlib.h" <System_Header>
extern double drand48 ( void ) ;
extern double erand48 ( unsigned short int __xsubi [ 3 ] ) ;
# 392 "/usr/include/stdlib.h" <System_Header>
extern long int lrand48 ( void ) ;
extern long int nrand48 ( unsigned short int __xsubi [ 3 ] )
;
# 397 "/usr/include/stdlib.h" <System_Header>
extern long int mrand48 ( void ) ;
extern long int jrand48 ( unsigned short int __xsubi [ 3 ] )
;
# 402 "/usr/include/stdlib.h" <System_Header>
extern void srand48 ( long int __seedval ) ;
extern unsigned short int * seed48 ( unsigned short int __seed16v [ 3 ] )
;
extern void lcong48 ( unsigned short int __param [ 7 ] ) ;
# 411 "/usr/include/stdlib.h" <System_Header>
struct drand48_data
{
unsigned short int __x [ 3 ] ;
unsigned short int __old_x [ 3 ] ;
unsigned short int __c ;
unsigned short int __init ;
unsigned long long int __a ;
} ;
# 422 "/usr/include/stdlib.h" <System_Header>
extern int drand48_r ( struct drand48_data * __restrict __buffer ,
double * __restrict __result ) ;
extern int erand48_r ( unsigned short int __xsubi [ 3 ] ,
struct drand48_data * __restrict __buffer ,
double * __restrict __result ) ;
# 429 "/usr/include/stdlib.h" <System_Header>
extern int lrand48_r ( struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
extern int nrand48_r ( unsigned short int __xsubi [ 3 ] ,
struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
# 438 "/usr/include/stdlib.h" <System_Header>
extern int mrand48_r ( struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
extern int jrand48_r ( unsigned short int __xsubi [ 3 ] ,
struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
# 447 "/usr/include/stdlib.h" <System_Header>
extern int srand48_r ( long int __seedval , struct drand48_data * __buffer )
;
# 451 "/usr/include/stdlib.h" <System_Header>
extern int seed48_r ( unsigned short int __seed16v [ 3 ] ,
struct drand48_data * __buffer ) ;
# 454 "/usr/include/stdlib.h" <System_Header>
extern int lcong48_r ( unsigned short int __param [ 7 ] ,
struct drand48_data * __buffer )
;
# 465 "/usr/include/stdlib.h" <System_Header>
extern void * malloc ( size_t __size ) ;
extern void * calloc ( size_t __nmemb , size_t __size )
;
# 476 "/usr/include/stdlib.h" <System_Header>
# 479 "/usr/include/stdlib.h" <System_Header>
extern void * realloc ( void * __ptr , size_t __size )
;
extern void free ( void * __ptr ) ;
# 487 "/usr/include/stdlib.h" <System_Header>
extern void cfree ( void * __ptr ) ;
# 492 "/usr/include/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 22 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 23 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 26 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 29 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 33 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 39 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
extern void * __alloca ( size_t __size ) ;
extern void * alloca ( size_t __size ) ;
extern void * __builtin_alloca ( size_t __size ) ;
# 493 "/usr/include/stdlib.h" <System_Header>
# 497 "/usr/include/stdlib.h" <System_Header>
extern void * valloc ( size_t __size ) ;
# 502 "/usr/include/stdlib.h" <System_Header>
extern int posix_memalign ( void * * __memptr , size_t __alignment , size_t __size )
;
# 514 "/usr/include/stdlib.h" <System_Header>
extern void abort ( void ) __attribute__ ( ( __noreturn__ ) ) ;
# 518 "/usr/include/stdlib.h" <System_Header>
extern int atexit ( void ( * __func ) ( void ) ) ;
# 534 "/usr/include/stdlib.h" <System_Header>
extern int on_exit ( void ( * __func ) ( int __status , void * __arg ) , void * __arg )
;
# 542 "/usr/include/stdlib.h" <System_Header>
extern void exit ( int __status ) __attribute__ ( ( __noreturn__ ) ) ;
# 556 "/usr/include/stdlib.h" <System_Header>
extern void _Exit ( int __status ) __attribute__ ( ( __noreturn__ ) ) ;
# 563 "/usr/include/stdlib.h" <System_Header>
extern char * getenv ( const char * __name ) ;
# 575 "/usr/include/stdlib.h" <System_Header>
# 577 "/usr/include/stdlib.h" <System_Header>
extern int putenv ( char * __string ) ;
# 583 "/usr/include/stdlib.h" <System_Header>
extern int setenv ( const char * __name , const char * __value , int __replace )
;
# 587 "/usr/include/stdlib.h" <System_Header>
extern int unsetenv ( const char * __name ) ;
# 594 "/usr/include/stdlib.h" <System_Header>
extern int clearenv ( void ) ;
# 605 "/usr/include/stdlib.h" <System_Header>
extern char * mktemp ( char * __template ) ;
# 617 "/usr/include/stdlib.h" <System_Header>
# 619 "/usr/include/stdlib.h" <System_Header>
extern int mkstemp ( char * __template ) ;
# 639 "/usr/include/stdlib.h" <System_Header>
# 641 "/usr/include/stdlib.h" <System_Header>
extern int mkstemps ( char * __template , int __suffixlen ) ;
# 661 "/usr/include/stdlib.h" <System_Header>
extern char * mkdtemp ( char * __template ) ;
# 715 "/usr/include/stdlib.h" <System_Header>
extern int system ( const char * __command ) ;
# 732 "/usr/include/stdlib.h" <System_Header>
extern char * realpath ( const char * __restrict __name ,
char * __restrict __resolved ) ;
# 738 "/usr/include/stdlib.h" <System_Header>
# 741 "/usr/include/stdlib.h" <System_Header>
typedef int ( * __compar_fn_t ) ( const void * , const void * ) ;
# 753 "/usr/include/stdlib.h" <System_Header>
extern void * bsearch ( const void * __key , const void * __base ,
size_t __nmemb , size_t __size , __compar_fn_t __compar )
;
# 763 "/usr/include/stdlib.h" <System_Header>
extern void qsort ( void * __base , size_t __nmemb , size_t __size ,
__compar_fn_t __compar ) ;
# 773 "/usr/include/stdlib.h" <System_Header>
extern int abs ( int __x ) __attribute__ ( ( __const__ ) ) ;
extern long int labs ( long int __x ) __attribute__ ( ( __const__ ) ) ;
# 779 "/usr/include/stdlib.h" <System_Header>
extern long long int llabs ( long long int __x )
__attribute__ ( ( __const__ ) ) ;
# 786 "/usr/include/stdlib.h" <System_Header>
extern div_t div ( int __numer , int __denom )
__attribute__ ( ( __const__ ) ) ;
extern ldiv_t ldiv ( long int __numer , long int __denom )
__attribute__ ( ( __const__ ) ) ;
# 796 "/usr/include/stdlib.h" <System_Header>
extern lldiv_t lldiv ( long long int __numer ,
long long int __denom )
__attribute__ ( ( __const__ ) ) ;
# 806 "/usr/include/stdlib.h" <System_Header>
# 810 "/usr/include/stdlib.h" <System_Header>
extern char * ecvt ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign ) ;
# 816 "/usr/include/stdlib.h" <System_Header>
extern char * fcvt ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign ) ;
# 822 "/usr/include/stdlib.h" <System_Header>
extern char * gcvt ( double __value , int __ndigit , char * __buf )
;
# 828 "/usr/include/stdlib.h" <System_Header>
extern char * qecvt ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign )
;
extern char * qfcvt ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign )
;
extern char * qgcvt ( long double __value , int __ndigit , char * __buf )
;
# 840 "/usr/include/stdlib.h" <System_Header>
extern int ecvt_r ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign , char * __restrict __buf ,
size_t __len ) ;
extern int fcvt_r ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign , char * __restrict __buf ,
size_t __len ) ;
# 848 "/usr/include/stdlib.h" <System_Header>
extern int qecvt_r ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign ,
char * __restrict __buf , size_t __len )
;
extern int qfcvt_r ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign ,
char * __restrict __buf , size_t __len )
;
# 861 "/usr/include/stdlib.h" <System_Header>
extern int mblen ( const char * __s , size_t __n ) ;
# 864 "/usr/include/stdlib.h" <System_Header>
extern int mbtowc ( wchar_t * __restrict __pwc ,
const char * __restrict __s , size_t __n ) ;
# 868 "/usr/include/stdlib.h" <System_Header>
extern int wctomb ( char * __s , wchar_t __wchar ) ;
# 872 "/usr/include/stdlib.h" <System_Header>
extern size_t mbstowcs ( wchar_t * __restrict __pwcs ,
const char * __restrict __s , size_t __n ) ;
extern size_t wcstombs ( char * __restrict __s ,
const wchar_t * __restrict __pwcs , size_t __n )
;
# 886 "/usr/include/stdlib.h" <System_Header>
extern int rpmatch ( const char * __response ) ;
# 897 "/usr/include/stdlib.h" <System_Header>
extern int getsubopt ( char * * __restrict __optionp ,
char * const * __restrict __tokens ,
char * * __restrict __valuep )
;
# 911 "/usr/include/stdlib.h" <System_Header>
# 949 "/usr/include/stdlib.h" <System_Header>
extern int getloadavg ( double __loadavg [ ] , int __nelem )
;
# 954 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" <System_Header>
# 955 "/usr/include/stdlib.h" <System_Header>
# 956 "/usr/include/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 442 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 456 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 459 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
int __builtin_abs ( int ) ;
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 90 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 90 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
extern void * omp_target_alloc ( size_t , int ) ;
extern void omp_target_free ( void * , int ) ;
extern int omp_target_memcpy ( void * , void * , size_t , size_t , size_t , int , int ) ;
# 94 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef int _Atomic_word ;
extern void _mp_atomic_add ( int * , int ) ;
extern void _mp_exchange_and_add ( int * , int ) ;
# 5 "main.c"
# 5 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 27 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
typedef enum {
acc_device_none = 0 ,
acc_device_default = 1 ,
acc_device_host = 2 ,
acc_device_not_host = 3 ,
acc_device_nvidia = 4 ,
acc_device_radeon = 5 ,
acc_device_xeonphi = 6 ,
acc_device_pgi_opencl = 7 ,
acc_device_nvidia_opencl = 8 ,
acc_device_opencl = 9
} acc_device_t ;
# 45 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
void acc_set_default_async ( int async ) ;
int acc_get_default_async ( void ) ;
extern int acc_get_num_devices ( acc_device_t devtype ) ;
extern acc_device_t acc_get_device ( void ) ;
extern void acc_set_device_num ( int devnum , acc_device_t devtype ) ;
extern int acc_get_device_num ( acc_device_t devtype ) ;
extern void acc_init ( acc_device_t devtype ) ;
extern void acc_shutdown ( acc_device_t devtype ) ;
extern void acc_set_deviceid ( int devid ) ;
extern int acc_get_deviceid ( int devnum , acc_device_t devtype ) ;
extern int acc_async_test ( long async ) ;
extern int acc_async_test_all ( void ) ;
extern void acc_async_wait ( long async ) ;
extern void acc_async_wait_all ( void ) ;
extern void acc_wait ( long async ) ;
extern void acc_wait_async ( long arg , long async ) ;
extern void acc_wait_all ( void ) ;
extern void acc_wait_all_async ( long async ) ;
extern int acc_on_device ( acc_device_t devtype ) ;
extern void acc_free ( void * ) ;
# 66 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void * acc_memcpy ( void * targetptr , void * srcptr , unsigned long bytes ) ;
extern void * acc_memcpy_async ( void * targetptr , void * srcptr , unsigned long bytes , long async ) ;
extern void * acc_copyin ( void * hostptr , unsigned long bytes ) ;
extern void * acc_copyin_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_pcopyin ( void * hostptr , unsigned long bytes ) ;
extern void * acc_pcopyin_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_present_or_copyin ( void * hostptr , unsigned long bytes ) ;
extern void * acc_present_or_copyin_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_create ( void * hostptr , unsigned long bytes ) ;
extern void * acc_create_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_pcreate ( void * hostptr , unsigned long bytes ) ;
extern void * acc_pcreate_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_present_or_create ( void * hostptr , unsigned long bytes ) ;
extern void * acc_present_or_create_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_copyout ( void * hostptr , unsigned long bytes ) ;
extern void acc_copyout_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_delete ( void * hostptr , unsigned long bytes ) ;
extern void acc_delete_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_update_device ( void * hostptr , unsigned long bytes ) ;
extern void acc_update_device_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_update_self ( void * hostptr , unsigned long bytes ) ;
extern void acc_update_self_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_update_host ( void * hostptr , unsigned long bytes ) ;
extern void acc_update_host_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_memcpy_to_device ( void * devptr , void * hostptr , unsigned long bytes ) ;
extern void acc_memcpy_to_device_async ( void * devptr , void * hostptr , unsigned long bytes , long async ) ;
extern void acc_memcpy_from_device ( void * hostptr , void * devptr , unsigned long bytes ) ;
extern void acc_memcpy_from_device_async ( void * hostptr , void * devptr , unsigned long bytes , long async ) ;
extern void * acc_memcpy_device ( void * targetdevptr , void * srcdevptr , unsigned long bytes ) ;
extern void * acc_memcpy_device_async ( void * targetdevptr , void * srcdevptr , unsigned long bytes , long async ) ;
extern void acc_attach ( void * * hostptrptr ) ;
extern void acc_attach_async ( void * * hostptrptr , long async ) ;
extern void acc_detach ( void * * hostptrptr ) ;
extern void acc_detach_async ( void * * hostptrptr , long async ) ;
# 101 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void acc_set_device_type ( acc_device_t devtype ) ;
extern acc_device_t acc_get_device_type ( void ) ;
extern void * acc_malloc ( unsigned long ) ;
extern void * acc_deviceptr ( void * hostptr ) ;
extern void * acc_hostptr ( void * devptr ) ;
extern void acc_map_data ( void * hostptr , void * devptr , unsigned long bytes ) ;
extern void acc_unmap_data ( void * hostptr ) ;
extern int acc_is_present ( void * hostptr , unsigned long bytes ) ;
extern int acc_present_count ( void * hostptr ) ;
extern void acc_updatein ( void * hostptr , unsigned long bytes ) ;
extern void acc_updatein_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_updateout ( void * hostptr , unsigned long bytes ) ;
extern void acc_updateout_async ( void * hostptr , unsigned long bytes , long async ) ;
# 115 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void * acc_get_current_cuda_context ( void ) ;
extern int acc_get_current_cuda_device ( void ) ;
extern void * acc_get_cuda_stream ( long ) ;
extern void acc_set_cuda_stream ( long , void * ) ;
extern void * acc_cuda_get_context ( int ) ;
extern int acc_cuda_get_device ( int ) ;
# 122 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void * acc_get_current_opencl_context ( void ) ;
extern void * acc_get_current_opencl_device ( void ) ;
extern void * acc_get_opencl_queue ( long ) ;
# 126 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicaddi ( void * address , int val ) ;
extern unsigned int atomicaddu ( void * address , unsigned int val ) ;
extern unsigned long long atomicaddul ( void * address , unsigned long long val ) ;
extern float atomicaddf ( void * address , float val ) ;
extern double atomicaddd ( void * address , double val ) ;
# 133 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicsubi ( void * address , int val ) ;
extern unsigned int atomicsubu ( void * address , unsigned int val ) ;
extern unsigned long long atomicsubul ( void * address , unsigned long long val ) ;
extern float atomicsubf ( void * address , float val ) ;
extern double atomicsubd ( void * address , double val ) ;
# 139 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicmaxi ( void * address , int val ) ;
extern unsigned int atomicmaxu ( void * address , unsigned int val ) ;
extern unsigned long long atomicmaxul ( void * address , unsigned long long val ) ;
extern float atomicmaxf ( void * address , float val ) ;
extern double atomicmaxd ( void * address , double val ) ;
# 145 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicmini ( void * address , int val ) ;
extern unsigned int atomicminu ( void * address , unsigned int val ) ;
extern unsigned long long atomicminul ( void * address , unsigned long long val ) ;
extern float atomicminf ( void * address , float val ) ;
extern double atomicmind ( void * address , double val ) ;
# 151 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicandi ( void * address , int val ) ;
extern unsigned int atomicandu ( void * address , unsigned int val ) ;
extern unsigned long long atomicandul ( void * address , unsigned long long val ) ;
# 155 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicori ( void * address , int val ) ;
extern unsigned int atomicoru ( void * address , unsigned int val ) ;
extern unsigned long long atomicorul ( void * address , unsigned long long val ) ;
# 159 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicxori ( void * address , int val ) ;
extern unsigned int atomicxoru ( void * address , unsigned int val ) ;
extern unsigned long long atomicxorul ( void * address , unsigned long long val ) ;
# 163 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicexchi ( void * address , int val ) ;
extern unsigned int atomicexchu ( void * address , unsigned int val ) ;
extern unsigned long long atomicexchul ( void * address , unsigned long long val ) ;
extern float atomicexchf ( void * address , float val ) ;
extern double atomicexchd ( void * address , double val ) ;
# 169 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern unsigned int atomicincu ( void * address , unsigned int val ) ;
# 171 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern unsigned int atomicdecu ( void * address , unsigned int val ) ;
# 173 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomiccasi ( void * address , int val , int val2 ) ;
extern unsigned int atomiccasu ( void * address , unsigned int val , unsigned int val2 ) ;
extern unsigned long long atomiccasul ( void * address , unsigned long long val , unsigned long long val2 ) ;
extern float atomiccasf ( void * address , float val , float val2 ) ;
extern double atomiccasd ( void * address , double val , double val2 ) ;
# 179 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int __pgi_gangidx ( void ) ;
extern int __pgi_workeridx ( void ) ;
extern int __pgi_vectoridx ( void ) ;
extern int __pgi_blockidx ( int ) ;
extern int __pgi_threadidx ( int ) ;
# 6 "main.c"
# 6 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 27 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 28 "/usr/include/stdio.h" <System_Header>
# 33 "/usr/include/stdio.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 34 "/usr/include/stdio.h" <System_Header>
# 35 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 36 "/usr/include/stdio.h" <System_Header>
# 43 "/usr/include/stdio.h" <System_Header>
struct _IO_FILE ;
# 47 "/usr/include/stdio.h" <System_Header>
typedef struct _IO_FILE FILE ;
# 63 "/usr/include/stdio.h" <System_Header>
typedef struct _IO_FILE __FILE ;
# 74 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/libio.h" <System_Header>
# 26 "/usr/include/libio.h" <System_Header>
# 31 "/usr/include/libio.h" <System_Header>
# 1 "/usr/include/_G_config.h" <System_Header>
# 2 "/usr/include/_G_config.h" <System_Header>
# 7 "/usr/include/_G_config.h" <System_Header>
# 9 "/usr/include/_G_config.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 10 "/usr/include/_G_config.h" <System_Header>
# 15 "/usr/include/_G_config.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/usr/include/_G_config.h" <System_Header>
# 20 "/usr/include/_G_config.h" <System_Header>
# 1 "/usr/include/wchar.h" <System_Header>
# 16 "/usr/include/wchar.h" <System_Header>
# 21 "/usr/include/wchar.h" <System_Header>
# 51 "/usr/include/wchar.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 357 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
typedef unsigned int wint_t ;
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 52 "/usr/include/wchar.h" <System_Header>
# 54 "/usr/include/wchar.h" <System_Header>
# 64 "/usr/include/wchar.h" <System_Header>
# 73 "/usr/include/wchar.h" <System_Header>
# 81 "/usr/include/wchar.h" <System_Header>
typedef struct
{
int __count ;
union
{
# 88 "/usr/include/wchar.h" <System_Header>
unsigned int __wch ;
# 92 "/usr/include/wchar.h" <System_Header>
char __wchb [ 4 ] ;
} __value ;
} __mbstate_t ;
# 100 "/usr/include/wchar.h" <System_Header>
# 901 "/usr/include/wchar.h" <System_Header>
# 21 "/usr/include/_G_config.h" <System_Header>
# 21 "/usr/include/_G_config.h" <System_Header>
typedef struct
{
__off_t __pos ;
__mbstate_t __state ;
} _G_fpos_t ;
typedef struct
{
__off64_t __pos ;
__mbstate_t __state ;
} _G_fpos64_t ;
# 45 "/usr/include/_G_config.h" <System_Header>
# 53 "/usr/include/_G_config.h" <System_Header>
# 32 "/usr/include/libio.h" <System_Header>
# 32 "/usr/include/libio.h" <System_Header>
# 47 "/usr/include/libio.h" <System_Header>
# 49 "/usr/include/libio.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 24 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 34 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
typedef struct __pgi_tag {
unsigned int gp_offset ;
unsigned int fp_offset ;
char * overflow_arg_area ;
char * reg_save_area ;
} __pgi_va_list [ 1 ] ;
# 49 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
typedef __pgi_va_list va_list ;
# 60 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
typedef __pgi_va_list __gnuc_va_list ;
# 25 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
extern void * __builtin_va_arg ( ) ;
extern int __builtin_va_start ( ) ;
# 60 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 50 "/usr/include/libio.h" <System_Header>
# 90 "/usr/include/libio.h" <System_Header>
# 124 "/usr/include/libio.h" <System_Header>
# 144 "/usr/include/libio.h" <System_Header>
struct _IO_jump_t ; struct _IO_FILE ;
# 146 "/usr/include/libio.h" <System_Header>
# 150 "/usr/include/libio.h" <System_Header>
typedef void _IO_lock_t ;
# 154 "/usr/include/libio.h" <System_Header>
# 156 "/usr/include/libio.h" <System_Header>
struct _IO_marker {
struct _IO_marker * _next ;
struct _IO_FILE * _sbuf ;
int _pos ;
# 173 "/usr/include/libio.h" <System_Header>
} ;
# 175 "/usr/include/libio.h" <System_Header>
enum __codecvt_result
{
__codecvt_ok ,
__codecvt_partial ,
__codecvt_error ,
__codecvt_noconv
} ;
# 241 "/usr/include/libio.h" <System_Header>
struct _IO_FILE {
int _flags ;
# 245 "/usr/include/libio.h" <System_Header>
char * _IO_read_ptr ;
char * _IO_read_end ;
char * _IO_read_base ;
char * _IO_write_base ;
char * _IO_write_ptr ;
char * _IO_write_end ;
char * _IO_buf_base ;
char * _IO_buf_end ;
char * _IO_save_base ;
char * _IO_backup_base ;
char * _IO_save_end ;
# 260 "/usr/include/libio.h" <System_Header>
struct _IO_marker * _markers ;
# 262 "/usr/include/libio.h" <System_Header>
struct _IO_FILE * _chain ;
# 264 "/usr/include/libio.h" <System_Header>
int _fileno ;
# 268 "/usr/include/libio.h" <System_Header>
int _flags2 ;
# 270 "/usr/include/libio.h" <System_Header>
__off_t _old_offset ;
# 273 "/usr/include/libio.h" <System_Header>
unsigned short _cur_column ;
signed char _vtable_offset ;
char _shortbuf [ 1 ] ;
# 278 "/usr/include/libio.h" <System_Header>
# 280 "/usr/include/libio.h" <System_Header>
_IO_lock_t * _lock ;
# 289 "/usr/include/libio.h" <System_Header>
__off64_t _offset ;
# 297 "/usr/include/libio.h" <System_Header>
void * __pad1 ;
void * __pad2 ;
void * __pad3 ;
void * __pad4 ;
# 302 "/usr/include/libio.h" <System_Header>
size_t __pad5 ;
int _mode ;
char _unused2 [ 15 * sizeof ( int ) - 4 * sizeof ( void * ) - sizeof ( size_t ) ] ;
# 307 "/usr/include/libio.h" <System_Header>
} ;
# 310 "/usr/include/libio.h" <System_Header>
typedef struct _IO_FILE _IO_FILE ;
# 313 "/usr/include/libio.h" <System_Header>
struct _IO_FILE_plus ;
# 315 "/usr/include/libio.h" <System_Header>
extern struct _IO_FILE_plus _IO_2_1_stdin_ ;
extern struct _IO_FILE_plus _IO_2_1_stdout_ ;
extern struct _IO_FILE_plus _IO_2_1_stderr_ ;
# 329 "/usr/include/libio.h" <System_Header>
# 332 "/usr/include/libio.h" <System_Header>
typedef __ssize_t __io_read_fn ( void * __cookie , char * __buf , size_t __nbytes ) ;
# 340 "/usr/include/libio.h" <System_Header>
typedef __ssize_t __io_write_fn ( void * __cookie , const char * __buf ,
size_t __n ) ;
# 349 "/usr/include/libio.h" <System_Header>
typedef int __io_seek_fn ( void * __cookie , __off64_t * __pos , int __w ) ;
# 352 "/usr/include/libio.h" <System_Header>
typedef int __io_close_fn ( void * __cookie ) ;
# 385 "/usr/include/libio.h" <System_Header>
extern int __underflow ( _IO_FILE * ) ;
extern int __uflow ( _IO_FILE * ) ;
extern int __overflow ( _IO_FILE * , int ) ;
# 429 "/usr/include/libio.h" <System_Header>
extern int _IO_getc ( _IO_FILE * __fp ) ;
extern int _IO_putc ( int __c , _IO_FILE * __fp ) ;
extern int _IO_feof ( _IO_FILE * __fp ) ;
extern int _IO_ferror ( _IO_FILE * __fp ) ;
# 434 "/usr/include/libio.h" <System_Header>
extern int _IO_peekc_locked ( _IO_FILE * __fp ) ;
# 436 "/usr/include/libio.h" <System_Header>
# 440 "/usr/include/libio.h" <System_Header>
extern void _IO_flockfile ( _IO_FILE * ) ;
extern void _IO_funlockfile ( _IO_FILE * ) ;
extern int _IO_ftrylockfile ( _IO_FILE * ) ;
# 459 "/usr/include/libio.h" <System_Header>
extern int _IO_vfscanf ( _IO_FILE * __restrict , const char * __restrict ,
__gnuc_va_list , int * __restrict ) ;
extern int _IO_vfprintf ( _IO_FILE * __restrict , const char * __restrict ,
__gnuc_va_list ) ;
extern __ssize_t _IO_padn ( _IO_FILE * , int , __ssize_t ) ;
extern size_t _IO_sgetn ( _IO_FILE * , void * , size_t ) ;
# 466 "/usr/include/libio.h" <System_Header>
extern __off64_t _IO_seekoff ( _IO_FILE * , __off64_t , int , int ) ;
extern __off64_t _IO_seekpos ( _IO_FILE * , __off64_t , int ) ;
# 469 "/usr/include/libio.h" <System_Header>
extern void _IO_free_backup_area ( _IO_FILE * ) ;
# 75 "/usr/include/stdio.h" <System_Header>
# 83 "/usr/include/stdio.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 84 "/usr/include/stdio.h" <System_Header>
# 107 "/usr/include/stdio.h" <System_Header>
# 110 "/usr/include/stdio.h" <System_Header>
typedef _G_fpos_t fpos_t ;
# 119 "/usr/include/stdio.h" <System_Header>
# 125 "/usr/include/stdio.h" <System_Header>
# 132 "/usr/include/stdio.h" <System_Header>
# 139 "/usr/include/stdio.h" <System_Header>
# 150 "/usr/include/stdio.h" <System_Header>
# 163 "/usr/include/stdio.h" <System_Header>
# 164 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" <System_Header>
# 165 "/usr/include/stdio.h" <System_Header>
# 167 "/usr/include/stdio.h" <System_Header>
extern struct _IO_FILE * stdin ;
extern struct _IO_FILE * stdout ;
extern struct _IO_FILE * stderr ;
# 177 "/usr/include/stdio.h" <System_Header>
extern int remove ( const char * __filename ) ;
extern int rename ( const char * __old , const char * __new ) ;
# 184 "/usr/include/stdio.h" <System_Header>
extern int renameat ( int __oldfd , const char * __old , int __newfd ,
const char * __new ) ;
# 193 "/usr/include/stdio.h" <System_Header>
# 195 "/usr/include/stdio.h" <System_Header>
extern FILE * tmpfile ( void ) ;
# 208 "/usr/include/stdio.h" <System_Header>
extern char * tmpnam ( char * __s ) ;
# 214 "/usr/include/stdio.h" <System_Header>
extern char * tmpnam_r ( char * __s ) ;
# 226 "/usr/include/stdio.h" <System_Header>
extern char * tempnam ( const char * __dir , const char * __pfx )
;
# 236 "/usr/include/stdio.h" <System_Header>
extern int fclose ( FILE * __stream ) ;
# 241 "/usr/include/stdio.h" <System_Header>
extern int fflush ( FILE * __stream ) ;
# 251 "/usr/include/stdio.h" <System_Header>
extern int fflush_unlocked ( FILE * __stream ) ;
# 271 "/usr/include/stdio.h" <System_Header>
extern FILE * fopen ( const char * __restrict __filename ,
const char * __restrict __modes ) ;
# 277 "/usr/include/stdio.h" <System_Header>
extern FILE * freopen ( const char * __restrict __filename ,
const char * __restrict __modes ,
FILE * __restrict __stream ) ;
# 305 "/usr/include/stdio.h" <System_Header>
extern FILE * fdopen ( int __fd , const char * __modes ) ;
# 318 "/usr/include/stdio.h" <System_Header>
extern FILE * fmemopen ( void * __s , size_t __len , const char * __modes )
;
# 324 "/usr/include/stdio.h" <System_Header>
extern FILE * open_memstream ( char * * __bufloc , size_t * __sizeloc ) ;
# 331 "/usr/include/stdio.h" <System_Header>
extern void setbuf ( FILE * __restrict __stream , char * __restrict __buf ) ;
# 335 "/usr/include/stdio.h" <System_Header>
extern int setvbuf ( FILE * __restrict __stream , char * __restrict __buf ,
int __modes , size_t __n ) ;
# 342 "/usr/include/stdio.h" <System_Header>
extern void setbuffer ( FILE * __restrict __stream , char * __restrict __buf ,
size_t __size ) ;
# 346 "/usr/include/stdio.h" <System_Header>
extern void setlinebuf ( FILE * __stream ) ;
# 355 "/usr/include/stdio.h" <System_Header>
extern int fprintf ( FILE * __restrict __stream ,
const char * __restrict __format , ... ) ;
# 361 "/usr/include/stdio.h" <System_Header>
extern int printf ( const char * __restrict __format , ... ) ;
extern int sprintf ( char * __restrict __s ,
const char * __restrict __format , ... ) ;
# 370 "/usr/include/stdio.h" <System_Header>
extern int vfprintf ( FILE * __restrict __s , const char * __restrict __format ,
__gnuc_va_list __arg ) ;
# 376 "/usr/include/stdio.h" <System_Header>
extern int vprintf ( const char * __restrict __format , __gnuc_va_list __arg ) ;
extern int vsprintf ( char * __restrict __s , const char * __restrict __format ,
__gnuc_va_list __arg ) ;
# 385 "/usr/include/stdio.h" <System_Header>
extern int snprintf ( char * __restrict __s , size_t __maxlen ,
const char * __restrict __format , ... )
__attribute__ ( ( __format__ ( __printf__ , 3 , 4 ) ) ) ;
# 390 "/usr/include/stdio.h" <System_Header>
extern int vsnprintf ( char * __restrict __s , size_t __maxlen ,
const char * __restrict __format , __gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __printf__ , 3 , 0 ) ) ) ;
# 411 "/usr/include/stdio.h" <System_Header>
extern int vdprintf ( int __fd , const char * __restrict __fmt ,
__gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __printf__ , 2 , 0 ) ) ) ;
extern int dprintf ( int __fd , const char * __restrict __fmt , ... )
__attribute__ ( ( __format__ ( __printf__ , 2 , 3 ) ) ) ;
# 424 "/usr/include/stdio.h" <System_Header>
extern int fscanf ( FILE * __restrict __stream ,
const char * __restrict __format , ... ) ;
# 430 "/usr/include/stdio.h" <System_Header>
extern int scanf ( const char * __restrict __format , ... ) ;
extern int sscanf ( const char * __restrict __s ,
const char * __restrict __format , ... ) ;
# 452 "/usr/include/stdio.h" <System_Header>
extern int __isoc99_fscanf ( FILE * __restrict __stream ,
const char * __restrict __format , ... ) ;
extern int __isoc99_scanf ( const char * __restrict __format , ... ) ;
extern int __isoc99_sscanf ( const char * __restrict __s ,
const char * __restrict __format , ... ) ;
# 470 "/usr/include/stdio.h" <System_Header>
extern int vfscanf ( FILE * __restrict __s , const char * __restrict __format ,
__gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __scanf__ , 2 , 0 ) ) ) ;
# 478 "/usr/include/stdio.h" <System_Header>
extern int vscanf ( const char * __restrict __format , __gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __scanf__ , 1 , 0 ) ) ) ;
# 482 "/usr/include/stdio.h" <System_Header>
extern int vsscanf ( const char * __restrict __s ,
const char * __restrict __format , __gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __scanf__ , 2 , 0 ) ) ) ;
# 508 "/usr/include/stdio.h" <System_Header>
extern int __isoc99_vfscanf ( FILE * __restrict __s ,
const char * __restrict __format ,
__gnuc_va_list __arg ) ;
extern int __isoc99_vscanf ( const char * __restrict __format ,
__gnuc_va_list __arg ) ;
extern int __isoc99_vsscanf ( const char * __restrict __s ,
const char * __restrict __format ,
__gnuc_va_list __arg ) ;
# 530 "/usr/include/stdio.h" <System_Header>
extern int fgetc ( FILE * __stream ) ;
extern int getc ( FILE * __stream ) ;
# 537 "/usr/include/stdio.h" <System_Header>
extern int getchar ( void ) ;
# 542 "/usr/include/stdio.h" <System_Header>
# 549 "/usr/include/stdio.h" <System_Header>
extern int getc_unlocked ( FILE * __stream ) ;
extern int getchar_unlocked ( void ) ;
# 560 "/usr/include/stdio.h" <System_Header>
extern int fgetc_unlocked ( FILE * __stream ) ;
# 572 "/usr/include/stdio.h" <System_Header>
extern int fputc ( int __c , FILE * __stream ) ;
extern int putc ( int __c , FILE * __stream ) ;
# 579 "/usr/include/stdio.h" <System_Header>
extern int putchar ( int __c ) ;
# 584 "/usr/include/stdio.h" <System_Header>
# 593 "/usr/include/stdio.h" <System_Header>
extern int fputc_unlocked ( int __c , FILE * __stream ) ;
# 601 "/usr/include/stdio.h" <System_Header>
extern int putc_unlocked ( int __c , FILE * __stream ) ;
extern int putchar_unlocked ( int __c ) ;
# 609 "/usr/include/stdio.h" <System_Header>
extern int getw ( FILE * __stream ) ;
# 612 "/usr/include/stdio.h" <System_Header>
extern int putw ( int __w , FILE * __stream ) ;
# 621 "/usr/include/stdio.h" <System_Header>
extern char * fgets ( char * __restrict __s , int __n , FILE * __restrict __stream )
;
# 637 "/usr/include/stdio.h" <System_Header>
extern char * gets ( char * __s ) ;
# 664 "/usr/include/stdio.h" <System_Header>
extern __ssize_t __getdelim ( char * * __restrict __lineptr ,
size_t * __restrict __n , int __delimiter ,
FILE * __restrict __stream ) ;
extern __ssize_t getdelim ( char * * __restrict __lineptr ,
size_t * __restrict __n , int __delimiter ,
FILE * __restrict __stream ) ;
# 677 "/usr/include/stdio.h" <System_Header>
extern __ssize_t getline ( char * * __restrict __lineptr ,
size_t * __restrict __n ,
FILE * __restrict __stream ) ;
# 688 "/usr/include/stdio.h" <System_Header>
extern int fputs ( const char * __restrict __s , FILE * __restrict __stream ) ;
# 694 "/usr/include/stdio.h" <System_Header>
extern int puts ( const char * __s ) ;
# 701 "/usr/include/stdio.h" <System_Header>
extern int ungetc ( int __c , FILE * __stream ) ;
# 708 "/usr/include/stdio.h" <System_Header>
extern size_t fread ( void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __stream ) ;
# 714 "/usr/include/stdio.h" <System_Header>
extern size_t fwrite ( const void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __s ) ;
# 736 "/usr/include/stdio.h" <System_Header>
extern size_t fread_unlocked ( void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __stream ) ;
extern size_t fwrite_unlocked ( const void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __stream ) ;
# 748 "/usr/include/stdio.h" <System_Header>
extern int fseek ( FILE * __stream , long int __off , int __whence ) ;
# 753 "/usr/include/stdio.h" <System_Header>
extern long int ftell ( FILE * __stream ) ;
# 758 "/usr/include/stdio.h" <System_Header>
extern void rewind ( FILE * __stream ) ;
# 765 "/usr/include/stdio.h" <System_Header>
# 772 "/usr/include/stdio.h" <System_Header>
extern int fseeko ( FILE * __stream , __off_t __off , int __whence ) ;
# 777 "/usr/include/stdio.h" <System_Header>
extern __off_t ftello ( FILE * __stream ) ;
# 797 "/usr/include/stdio.h" <System_Header>
extern int fgetpos ( FILE * __restrict __stream , fpos_t * __restrict __pos ) ;
# 802 "/usr/include/stdio.h" <System_Header>
extern int fsetpos ( FILE * __stream , const fpos_t * __pos ) ;
# 825 "/usr/include/stdio.h" <System_Header>
extern void clearerr ( FILE * __stream ) ;
extern int feof ( FILE * __stream ) ;
extern int ferror ( FILE * __stream ) ;
# 834 "/usr/include/stdio.h" <System_Header>
extern void clearerr_unlocked ( FILE * __stream ) ;
extern int feof_unlocked ( FILE * __stream ) ;
extern int ferror_unlocked ( FILE * __stream ) ;
# 845 "/usr/include/stdio.h" <System_Header>
extern void perror ( const char * __s ) ;
# 852 "/usr/include/stdio.h" <System_Header>
# 853 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
extern int sys_nerr ;
extern const char * const sys_errlist [ ] ;
# 854 "/usr/include/stdio.h" <System_Header>
# 857 "/usr/include/stdio.h" <System_Header>
extern int fileno ( FILE * __stream ) ;
# 862 "/usr/include/stdio.h" <System_Header>
extern int fileno_unlocked ( FILE * __stream ) ;
# 871 "/usr/include/stdio.h" <System_Header>
extern FILE * popen ( const char * __command , const char * __modes ) ;
# 877 "/usr/include/stdio.h" <System_Header>
extern int pclose ( FILE * __stream ) ;
# 883 "/usr/include/stdio.h" <System_Header>
extern char * ctermid ( char * __s ) ;
# 909 "/usr/include/stdio.h" <System_Header>
# 911 "/usr/include/stdio.h" <System_Header>
extern void flockfile ( FILE * __stream ) ;
# 915 "/usr/include/stdio.h" <System_Header>
extern int ftrylockfile ( FILE * __stream ) ;
# 918 "/usr/include/stdio.h" <System_Header>
extern void funlockfile ( FILE * __stream ) ;
# 931 "/usr/include/stdio.h" <System_Header>
# 7 "main.c"
# 7 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 8 "main.c"
# 8 "main.c"
# 1 "/usr/include/string.h" <System_Header>
# 16 "/usr/include/string.h" <System_Header>
# 20 "/usr/include/string.h" <System_Header>
# 25 "/usr/include/string.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 26 "/usr/include/string.h" <System_Header>
# 29 "/usr/include/string.h" <System_Header>
# 32 "/usr/include/string.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 33 "/usr/include/string.h" <System_Header>
# 34 "/usr/include/string.h" <System_Header>
# 41 "/usr/include/string.h" <System_Header>
extern void * memcpy ( void * __restrict __dest , const void * __restrict __src ,
size_t __n ) ;
# 45 "/usr/include/string.h" <System_Header>
extern void * memmove ( void * __dest , const void * __src , size_t __n )
;
# 52 "/usr/include/string.h" <System_Header>
# 54 "/usr/include/string.h" <System_Header>
extern void * memccpy ( void * __restrict __dest , const void * __restrict __src ,
int __c , size_t __n )
;
# 61 "/usr/include/string.h" <System_Header>
extern void * memset ( void * __s , int __c , size_t __n ) ;
# 64 "/usr/include/string.h" <System_Header>
extern int memcmp ( const void * __s1 , const void * __s2 , size_t __n )
;
# 68 "/usr/include/string.h" <System_Header>
# 92 "/usr/include/string.h" <System_Header>
extern void * memchr ( const void * __s , int __c , size_t __n )
;
# 124 "/usr/include/string.h" <System_Header>
extern char * strcpy ( char * __restrict __dest , const char * __restrict __src )
;
extern char * strncpy ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
# 132 "/usr/include/string.h" <System_Header>
extern char * strcat ( char * __restrict __dest , const char * __restrict __src )
;
extern char * strncat ( char * __restrict __dest , const char * __restrict __src ,
size_t __n ) ;
# 139 "/usr/include/string.h" <System_Header>
extern int strcmp ( const char * __s1 , const char * __s2 )
;
extern int strncmp ( const char * __s1 , const char * __s2 , size_t __n )
;
# 146 "/usr/include/string.h" <System_Header>
extern int strcoll ( const char * __s1 , const char * __s2 )
;
extern size_t strxfrm ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
# 158 "/usr/include/string.h" <System_Header>
# 159 "/usr/include/string.h" <System_Header>
# 1 "/usr/include/xlocale.h" <System_Header>
# 18 "/usr/include/xlocale.h" <System_Header>
# 26 "/usr/include/xlocale.h" <System_Header>
typedef struct __locale_struct
{
struct __locale_data * __locales [ 13 ] ;
# 32 "/usr/include/xlocale.h" <System_Header>
const unsigned short int * __ctype_b ;
const int * __ctype_tolower ;
const int * __ctype_toupper ;
# 37 "/usr/include/xlocale.h" <System_Header>
const char * __names [ 13 ] ;
} * __locale_t ;
# 41 "/usr/include/xlocale.h" <System_Header>
typedef __locale_t locale_t ;
# 160 "/usr/include/string.h" <System_Header>
# 161 "/usr/include/string.h" <System_Header>
extern int strcoll_l ( const char * __s1 , const char * __s2 , __locale_t __l )
;
extern size_t strxfrm_l ( char * __dest , const char * __src , size_t __n ,
__locale_t __l ) ;
# 170 "/usr/include/string.h" <System_Header>
extern char * strdup ( const char * __s )
;
# 177 "/usr/include/string.h" <System_Header>
# 179 "/usr/include/string.h" <System_Header>
extern char * strndup ( const char * __string , size_t __n )
;
# 207 "/usr/include/string.h" <System_Header>
# 231 "/usr/include/string.h" <System_Header>
extern char * strchr ( const char * __s , int __c )
;
# 234 "/usr/include/string.h" <System_Header>
# 258 "/usr/include/string.h" <System_Header>
extern char * strrchr ( const char * __s , int __c )
;
# 279 "/usr/include/string.h" <System_Header>
extern size_t strcspn ( const char * __s , const char * __reject )
;
# 283 "/usr/include/string.h" <System_Header>
extern size_t strspn ( const char * __s , const char * __accept )
;
# 310 "/usr/include/string.h" <System_Header>
extern char * strpbrk ( const char * __s , const char * __accept )
;
# 313 "/usr/include/string.h" <System_Header>
# 337 "/usr/include/string.h" <System_Header>
extern char * strstr ( const char * __haystack , const char * __needle )
;
# 342 "/usr/include/string.h" <System_Header>
extern char * strtok ( char * __restrict __s , const char * __restrict __delim )
;
# 348 "/usr/include/string.h" <System_Header>
extern char * __strtok_r ( char * __restrict __s ,
const char * __restrict __delim ,
char * * __restrict __save_ptr )
;
# 354 "/usr/include/string.h" <System_Header>
extern char * strtok_r ( char * __restrict __s , const char * __restrict __delim ,
char * * __restrict __save_ptr )
;
# 393 "/usr/include/string.h" <System_Header>
extern size_t strlen ( const char * __s )
;
# 400 "/usr/include/string.h" <System_Header>
extern size_t strnlen ( const char * __string , size_t __maxlen )
;
# 407 "/usr/include/string.h" <System_Header>
extern char * strerror ( int __errnum ) ;
# 417 "/usr/include/string.h" <System_Header>
# 420 "/usr/include/string.h" <System_Header>
# 426 "/usr/include/string.h" <System_Header>
extern int __xpg_strerror_r ( int __errnum , char * __buf , size_t __buflen )
;
# 439 "/usr/include/string.h" <System_Header>
extern char * strerror_l ( int __errnum , __locale_t __l ) ;
# 445 "/usr/include/string.h" <System_Header>
extern void __bzero ( void * __s , size_t __n ) ;
# 449 "/usr/include/string.h" <System_Header>
extern void bcopy ( const void * __src , void * __dest , size_t __n )
;
# 453 "/usr/include/string.h" <System_Header>
extern void bzero ( void * __s , size_t __n ) ;
# 456 "/usr/include/string.h" <System_Header>
extern int bcmp ( const void * __s1 , const void * __s2 , size_t __n )
;
# 460 "/usr/include/string.h" <System_Header>
# 484 "/usr/include/string.h" <System_Header>
extern char * index ( const char * __s , int __c )
;
# 488 "/usr/include/string.h" <System_Header>
# 512 "/usr/include/string.h" <System_Header>
extern char * rindex ( const char * __s , int __c )
;
# 517 "/usr/include/string.h" <System_Header>
extern int ffs ( int __i ) __attribute__ ( ( __const__ ) ) ;
# 521 "/usr/include/string.h" <System_Header>
# 528 "/usr/include/string.h" <System_Header>
extern int strcasecmp ( const char * __s1 , const char * __s2 )
;
# 532 "/usr/include/string.h" <System_Header>
extern int strncasecmp ( const char * __s1 , const char * __s2 , size_t __n )
;
# 551 "/usr/include/string.h" <System_Header>
extern char * strsep ( char * * __restrict __stringp ,
const char * __restrict __delim )
;
# 558 "/usr/include/string.h" <System_Header>
extern char * strsignal ( int __sig ) ;
# 561 "/usr/include/string.h" <System_Header>
extern char * __stpcpy ( char * __restrict __dest , const char * __restrict __src )
;
extern char * stpcpy ( char * __restrict __dest , const char * __restrict __src )
;
# 568 "/usr/include/string.h" <System_Header>
extern char * __stpncpy ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
extern char * stpncpy ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
# 9 "main.c"
# 9 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 30 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 25 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 28 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 33 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 34 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 4 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 7 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 30 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 25 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 28 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 1 "/usr/include/limits.h" <System_Header>
# 16 "/usr/include/limits.h" <System_Header>
# 20 "/usr/include/limits.h" <System_Header>
# 25 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 26 "/usr/include/limits.h" <System_Header>
# 30 "/usr/include/limits.h" <System_Header>
# 35 "/usr/include/limits.h" <System_Header>
# 40 "/usr/include/limits.h" <System_Header>
# 44 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 45 "/usr/include/limits.h" <System_Header>
# 47 "/usr/include/limits.h" <System_Header>
# 50 "/usr/include/limits.h" <System_Header>
# 52 "/usr/include/limits.h" <System_Header>
# 55 "/usr/include/limits.h" <System_Header>
# 59 "/usr/include/limits.h" <System_Header>
# 62 "/usr/include/limits.h" <System_Header>
# 71 "/usr/include/limits.h" <System_Header>
# 75 "/usr/include/limits.h" <System_Header>
# 78 "/usr/include/limits.h" <System_Header>
# 82 "/usr/include/limits.h" <System_Header>
# 85 "/usr/include/limits.h" <System_Header>
# 93 "/usr/include/limits.h" <System_Header>
# 102 "/usr/include/limits.h" <System_Header>
# 106 "/usr/include/limits.h" <System_Header>
# 116 "/usr/include/limits.h" <System_Header>
# 120 "/usr/include/limits.h" <System_Header>
# 128 "/usr/include/limits.h" <System_Header>
# 142 "/usr/include/limits.h" <System_Header>
# 143 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 36 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 46 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 56 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 59 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 63 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 69 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 72 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 75 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 82 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 95 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 98 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 102 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 105 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 108 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 111 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 114 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 117 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 120 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 123 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 127 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 130 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 133 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 136 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 155 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 159 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 160 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 38 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 1 "/usr/include/linux/limits.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 40 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 45 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 55 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 61 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 63 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 68 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 73 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 77 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 80 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 83 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 86 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 89 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 92 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 95 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 98 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 161 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 169 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 144 "/usr/include/limits.h" <System_Header>
# 147 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 20 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 32 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 35 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 46 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 54 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 60 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 87 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 148 "/usr/include/limits.h" <System_Header>
# 169 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 8 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 57 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 66 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 71 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 77 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 85 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 102 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 108 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 116 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 122 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 127 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 133 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 138 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 144 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 163 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 10 "main.c"
# 11 "main.c"
# 27 "main.c"
int __MACC_NUMGPUS = - 1 ;
# 29 "main.c"
int __macc_get_num_gpus ( )
{
return acc_get_num_devices ( acc_device_nvidia ) ;
}
# 34 "main.c"
int * __MACC_TOPOLOGY ;
# 36 "main.c"
void __macc_set_gpu_num ( int i )
{
acc_set_device_num ( __MACC_TOPOLOGY [ i ] , acc_device_nvidia ) ;
}
# 44 "main.c"
struct __MaccDataTableEntry {
void * addr ;
void * addr_ub ;
int type_size ;
int entire_lb ;
int entire_ub ;
int dirty ;
int dirty_lb ;
int dirty_ub ;
int offset ;
struct __MaccDataTableEntry * next ;
} ;
# 57 "main.c"
struct __MaccDataTable {
struct __MaccDataTableEntry * entries [ 256 ] ;
} ;
# 61 "main.c"
struct __MaccDataTable * __MACC_DATA_TABLE_SET ;
# 67 "main.c"
struct __MaccDataWrapCache {
void * addr [ 16 * 16 ] ;
struct __MaccDataTableEntry * entry [ 16 * 16 ] ;
int offset [ 16 * 16 ] ;
int cachenum [ 16 ] ;
} ;
# 74 "main.c"
struct __MaccDataWrapCache * __MACC_DATA_WRAP_CACHE_SET ;
# 76 "main.c"
void __macc_data_table_insert (
int gpu_num , void * ptr , int type_size , int entire_lb , int entire_ub )
{
int index = ( ( ( long ) ptr / 16 ) % 256 ) ;
# 81 "main.c"
struct __MaccDataTableEntry * new_entry = malloc_managed ( sizeof ( struct __MaccDataTableEntry ) ) ;
# 83 "main.c"
new_entry -> addr = ptr ;
new_entry -> addr_ub = ptr + entire_ub * type_size ;
new_entry -> type_size = type_size ;
new_entry -> entire_lb = entire_lb ;
new_entry -> entire_ub = entire_ub ;
new_entry -> dirty = 0 ;
new_entry -> dirty_lb = - 1 ;
new_entry -> dirty_ub = - 1 ;
new_entry -> next = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ;
# 93 "main.c"
__MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] = new_entry ;
}
# 96 "main.c"
struct __MaccDataTableEntry * __macc_data_table_find ( int gpu_num , void * ptr )
{
int index = ( ( ( long ) ptr / 16 ) % 256 ) ;
struct __MaccDataTableEntry * entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ;
# 101 "main.c"
while ( entry != ( ( void * ) 0 ) ) {
if ( entry -> addr == ptr ) {
entry -> offset = 0 ;
return entry ;
}
# 107 "main.c"
entry = entry -> next ;
}
# 110 "main.c"
struct __MaccDataWrapCache wrap_cache = __MACC_DATA_WRAP_CACHE_SET [ gpu_num ] ;
int lane = ( ( ( long ) ptr / 16 ) % 16 ) ;
# 113 "main.c"
for ( int i = 0 ; i < wrap_cache . cachenum [ lane ] ; i ++ ) {
if ( ptr == wrap_cache . addr [ lane * 16 + i ] ) {
entry = wrap_cache . entry [ lane * 16 + i ] ;
entry -> offset = wrap_cache . offset [ lane * 16 + i ] ;
return entry ;
}
}
# 121 "main.c"
for ( int i = 0 ; i < 256 ; i ++ ) {
entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ i ] ;
# 124 "main.c"
while ( entry != ( ( void * ) 0 ) ) {
if ( entry -> addr <= ptr && ptr <= entry -> addr_ub ) {
int offset = ( ptr - entry -> addr ) / entry -> type_size ;
# 128 "main.c"
int cachenum = wrap_cache . cachenum [ lane ] ;
# 130 "main.c"
if ( cachenum == 16 ) {
cachenum = 0 ;
}
# 134 "main.c"
wrap_cache . addr [ lane * 16 + cachenum ] = entry -> addr ;
wrap_cache . entry [ lane * 16 + cachenum ] = entry ;
wrap_cache . offset [ lane * 16 + cachenum ] = offset ;
# 138 "main.c"
wrap_cache . cachenum [ lane ] = cachenum + 1 ;
# 140 "main.c"
entry -> offset = offset ;
return entry ;
}
# 144 "main.c"
entry = entry -> next ;
}
}
# 148 "main.c"
fprintf ( stderr , "Error on __macc_data_table_find: Not found the item %p\n" , ptr ) ;
exit ( - 1 ) ;
# 151 "main.c"
return ( ( void * ) 0 ) ;
}
# 154 "main.c"
void __macc_data_table_delete ( int gpu_num , void * ptr )
{
int index = ( ( ( long ) ptr / 16 ) % 256 ) ;
struct __MaccDataTableEntry * entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ;
struct __MaccDataTableEntry * pre = ( ( void * ) 0 ) ;
# 160 "main.c"
memset ( __MACC_DATA_WRAP_CACHE_SET [ gpu_num ] . cachenum , 0 , 16 * sizeof ( int ) ) ;
# 162 "main.c"
if ( entry != ( ( void * ) 0 ) ) {
if ( entry -> addr == ptr ) {
__MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] = entry -> next ;
free_managed ( entry ) ;
return ;
}
# 169 "main.c"
pre = entry ;
entry = entry -> next ;
}
# 173 "main.c"
while ( pre != ( ( void * ) 0 ) && entry != ( ( void * ) 0 ) ) {
if ( entry -> addr == ptr ) {
pre -> next = entry -> next ;
free_managed ( entry ) ;
return ;
}
# 180 "main.c"
pre = entry ;
entry = entry -> next ;
}
# 184 "main.c"
fprintf ( stderr , "Error on __macc_data_table_delete: Not found the item %p\n" , ptr ) ;
exit ( - 1 ) ;
}
# 188 "main.c"
void __macc_delete ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_delete_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ;
__macc_data_table_delete ( gpu_num , ptr ) ;
acc_wait ( gpu_num ) ;
}
# 195 "main.c"
void __macc_copyout ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
# 199 "main.c"
if ( entry -> dirty )
acc_update_self_async ( ( entry -> addr + entry -> dirty_lb * entry -> type_size ) ,
( ( entry -> dirty_ub - entry -> dirty_lb + 1 ) * entry -> type_size ) ,
gpu_num ) ;
# 204 "main.c"
__macc_delete ( gpu_num , ptr , type_size , lb , length ) ;
}
# 207 "main.c"
void __macc_copyin ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_copyin_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ;
__macc_data_table_insert ( gpu_num , ptr , type_size , lb , lb + length - 1 ) ;
acc_wait ( gpu_num ) ;
}
# 214 "main.c"
void __macc_create ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_create_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ;
__macc_data_table_insert ( gpu_num , ptr , type_size , lb , lb + length - 1 ) ;
acc_wait ( gpu_num ) ;
}
# 221 "main.c"
void * __macc_malloc ( unsigned long size )
{
void * ret = malloc_managed ( size ) ;
# 225 "main.c"
# 225 "main.c"
#pragma omp parallel num_threads ( __MACC_NUMGPUS )
{
__macc_create ( omp_get_thread_num ( ) , ret , 1 , 0 , size ) ;
}
# 230 "main.c"
return ret ;
}
# 233 "main.c"
void __macc_free ( void * ptr )
{
# 235 "main.c"
#pragma omp parallel num_threads ( __MACC_NUMGPUS )
{
int gpu_num = omp_get_thread_num ( ) ;
struct __MaccDataTableEntry * entry =
__macc_data_table_find ( gpu_num , ptr ) ;
__macc_delete ( gpu_num , ptr , 1 , 0 , entry -> entire_ub + 1 ) ;
}
free_managed ( ptr ) ;
}
# 245 "main.c"
void __macc_update_self ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
ptr = entry -> addr ;
lb += entry -> offset ;
int ub = lb + length - 1 ;
# 252 "main.c"
if ( entry -> dirty && ( ! ( entry -> dirty_lb > ub || entry -> dirty_ub < lb ) ) ) {
int new_lb = ( ( ( entry -> dirty_lb ) > ( lb ) ) ? ( entry -> dirty_lb ) : ( lb ) ) ;
int new_ub = ( ( ( entry -> dirty_ub ) < ( ub ) ) ? ( entry -> dirty_ub ) : ( ub ) ) ;
acc_update_self ( ( ptr + new_lb * type_size ) , ( ( new_ub - new_lb + 1 ) * type_size ) ) ;
}
}
# 259 "main.c"
void __macc_update_device ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_update_device ( ( ptr + lb * type_size ) , length * type_size ) ;
}
# 264 "main.c"
void __macc_init_access_region ( int gpu_num , int * lb_set , int * ub_set )
{
lb_set [ gpu_num ] = 2147483647 ;
ub_set [ gpu_num ] = - 1 ;
}
# 270 "main.c"
void __macc_update_access_region ( int gpu_num , int * lb_set , int * ub_set , int val )
{
lb_set [ gpu_num ] = ( ( ( lb_set [ gpu_num ] ) < ( val ) ) ? ( lb_set [ gpu_num ] ) : ( val ) ) ;
ub_set [ gpu_num ] = ( ( ( ub_set [ gpu_num ] ) > ( val ) ) ? ( ub_set [ gpu_num ] ) : ( val ) ) ;
}
# 276 "main.c"
int __macc_region_is_overlapping ( int * lb_set , int * ub_set )
{
for ( int i = 0 ; i < __MACC_NUMGPUS - 1 ; i ++ )
for ( int j = i + 1 ; j < __MACC_NUMGPUS ; j ++ )
if ( ( ! ( lb_set [ i ] > ub_set [ j ] || ub_set [ i ] < lb_set [ j ] ) ) )
return 1 ;
# 283 "main.c"
return 0 ;
}
# 286 "main.c"
void __macc_calc_loop_region
( int * loop_lb_set , int * loop_ub_set ,
int entire_start , int entire_end , int step , int until_equal )
{
int tmp = entire_start + step * ( ( entire_end - entire_start ) / step ) ;
entire_end = tmp - ( ( until_equal || entire_end != tmp ) ? 0 : step ) ;
# 294 "main.c"
int len = entire_end - entire_start + step ;
int width = ( int ) ( ( float ) len / __MACC_NUMGPUS ) ;
width -= width % step ;
int rem = ( len - width * __MACC_NUMGPUS ) / step ;
width -= step ;
# 300 "main.c"
int pos = entire_start ;
# 302 "main.c"
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) {
loop_lb_set [ i ] = pos ;
pos = ( width < 0 ) ? pos : ( ( ( pos + width + ( ( i < rem ) ? step : 0 ) ) < ( entire_end ) ) ? ( pos + width + ( ( i < rem ) ? step : 0 ) ) : ( entire_end ) ) ;
loop_ub_set [ i ] = pos ;
pos = ( ( ( pos + step ) < ( entire_end ) ) ? ( pos + step ) : ( entire_end ) ) ;
}
}
# 310 "main.c"
void __macc_adjust_data_region ( void * ptr , int gpu_num , int * lb_set , int * ub_set )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
# 314 "main.c"
lb_set [ gpu_num ] += entry -> offset ;
ub_set [ gpu_num ] += entry -> offset ;
}
# 318 "main.c"
void __macc_rewrite_loop_region_into_single ( int * loop_lb_set , int * loop_ub_set )
{
loop_ub_set [ 0 ] = loop_ub_set [ __MACC_NUMGPUS - 1 ] ;
# 322 "main.c"
for ( int i = 1 ; i < __MACC_NUMGPUS ; i ++ ) {
loop_lb_set [ i ] = 1 ;
loop_ub_set [ i ] = 0 ;
}
}
# 328 "main.c"
void __macc_rewrite_data_region_into_single ( int * lb_set , int * ub_set )
{
int gpu_ub = __MACC_NUMGPUS - 1 ;
lb_set [ 0 ] = ( ( ( lb_set [ 0 ] ) < ( lb_set [ gpu_ub ] ) ) ? ( lb_set [ 0 ] ) : ( lb_set [ gpu_ub ] ) ) ;
ub_set [ 0 ] = ( ( ( ub_set [ 0 ] ) > ( ub_set [ gpu_ub ] ) ) ? ( ub_set [ 0 ] ) : ( ub_set [ gpu_ub ] ) ) ;
}
# 335 "main.c"
void __macc_sync_data ( int gpu_num , void * ptr , int type_size , int lb , int ub )
{
void * update_addr = ( ptr + lb * type_size ) ;
size_t length_b = ( ( ub - lb + 1 ) * type_size ) ;
# 340 "main.c"
acc_update_self ( update_addr , length_b ) ;
# 342 "main.c"
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ )
{
if ( i != gpu_num ) {
__macc_set_gpu_num ( i ) ;
acc_update_device ( update_addr , length_b ) ;
}
}
# 352 "main.c"
__macc_set_gpu_num ( gpu_num ) ;
}
# 355 "main.c"
void __macc_set_data_region ( int gpu_num , void * ptr , int multi ,
int use_type , int * use_lb_set , int * use_ub_set ,
int def_type , int * def_lb_set , int * def_ub_set )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
ptr = entry -> addr ;
# 363 "main.c"
if ( entry -> dirty && ( multi || gpu_num != 0 ) && __MACC_NUMGPUS > 1 ) {
int update_all = 0 ;
int update_all_DtoH = 0 ;
# 370 "main.c"
if ( use_type == 0 || def_type == 0 )
update_all = 1 ;
# 373 "main.c"
else if ( def_type == 2 ) {
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) {
if ( i != gpu_num &&
( ! ( entry -> dirty_lb > def_ub_set [ i ] || entry -> dirty_ub < def_lb_set [ i ] ) ) ) {
# 378 "main.c"
update_all = 1 ;
break ;
}
}
}
# 384 "main.c"
if ( ! update_all ) {
int every_whole = 1 ;
int unused_lb = entry -> dirty_lb ;
int unused_ub = entry -> dirty_ub ;
# 389 "main.c"
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) {
if ( i != gpu_num ) {
if ( ( use_lb_set [ i ] <= entry -> dirty_lb && entry -> dirty_ub <= use_ub_set [ i ] ) ) {
# 393 "main.c"
update_all_DtoH = 1 ;
}
else {
every_whole = 0 ;
# 398 "main.c"
if ( use_lb_set [ i ] <= unused_lb )
unused_lb = ( ( ( unused_lb ) > ( use_ub_set [ i ] + 1 ) ) ? ( unused_lb ) : ( use_ub_set [ i ] + 1 ) ) ;
else if ( use_ub_set [ i ] >= unused_ub )
unused_ub = ( ( ( unused_ub ) < ( use_lb_set [ i ] - 1 ) ) ? ( unused_ub ) : ( use_lb_set [ i ] - 1 ) ) ;
}
}
}
# 406 "main.c"
if ( every_whole )
update_all = 1 ;
if ( unused_ub < unused_lb )
update_all_DtoH = 1 ;
}
# 412 "main.c"
if ( update_all ) {
__macc_sync_data ( gpu_num , ptr , entry -> type_size , entry -> dirty_lb , entry -> dirty_ub ) ;
entry -> dirty = 0 ;
}
# 418 "main.c"
else if ( entry -> dirty && use_type == 2 ) {
int thread_num = multi ? __MACC_NUMGPUS : 1 ;
# 422 "main.c"
if ( update_all_DtoH )
acc_update_self ( ( ptr + entry -> dirty_lb * entry -> type_size ) ,
( ( entry -> dirty_ub - entry -> dirty_lb + 1 ) * entry -> type_size ) ) ;
# 426 "main.c"
for ( int i = 0 ; i < thread_num ; i ++ )
{
# 431 "main.c"
if ( i != gpu_num && ( ! ( entry -> dirty_lb > use_ub_set [ i ] || entry -> dirty_ub < use_lb_set [ i ] ) ) ) {
# 435 "main.c"
int update_lb = ( ( ( entry -> dirty_lb ) > ( use_lb_set [ i ] ) ) ? ( entry -> dirty_lb ) : ( use_lb_set [ i ] ) ) ;
int update_ub = ( ( ( entry -> dirty_ub ) < ( use_ub_set [ i ] ) ) ? ( entry -> dirty_ub ) : ( use_ub_set [ i ] ) ) ;
void * update_addr = ( ptr + update_lb * entry -> type_size ) ;
size_t length_b = ( ( update_ub - update_lb + 1 ) * entry -> type_size ) ;
# 440 "main.c"
if ( ! update_all_DtoH ) {
__macc_set_gpu_num ( gpu_num ) ;
acc_update_self ( update_addr , length_b ) ;
}
__macc_set_gpu_num ( i ) ;
acc_update_device ( update_addr , length_b ) ;
}
}
__macc_set_gpu_num ( gpu_num ) ;
}
}
# 453 "main.c"
if ( ( multi || gpu_num == 0 ) && def_type != 1 ) {
if ( def_type == 0 ) {
entry -> dirty = 1 ;
entry -> dirty_lb = entry -> entire_lb ;
entry -> dirty_ub = entry -> entire_ub ;
}
# 465 "main.c"
else if ( ! ( entry -> dirty ) ) {
entry -> dirty = 1 ;
entry -> dirty_lb = def_lb_set [ gpu_num ] ;
entry -> dirty_ub = def_ub_set [ gpu_num ] ;
}
# 471 "main.c"
else if (
( ! ( entry -> dirty_lb > def_ub_set [ gpu_num ] || entry -> dirty_ub < def_lb_set [ gpu_num ] ) ) ||
# 477 "main.c"
entry -> dirty_lb == def_ub_set [ gpu_num ] + 1 ||
def_lb_set [ gpu_num ] == entry -> dirty_ub + 1
) {
entry -> dirty_lb = ( ( ( entry -> dirty_lb ) < ( def_lb_set [ gpu_num ] ) ) ? ( entry -> dirty_lb ) : ( def_lb_set [ gpu_num ] ) ) ;
entry -> dirty_ub = ( ( ( entry -> dirty_ub ) > ( def_ub_set [ gpu_num ] ) ) ? ( entry -> dirty_ub ) : ( def_ub_set [ gpu_num ] ) ) ;
}
# 485 "main.c"
else {
__macc_sync_data ( gpu_num , ptr , entry -> type_size , entry -> dirty_lb , entry -> dirty_ub ) ;
entry -> dirty_lb = def_lb_set [ gpu_num ] ;
entry -> dirty_ub = def_ub_set [ gpu_num ] ;
}
}
}
# 493 "main.c"
void __macc_set_data_region_multi (
int gpu_num , int multi , int len , void * * ptrs ,
int * use_type , int * * use_lb_set , int * * use_ub_set ,
int * def_type , int * * def_lb_set , int * * def_ub_set )
{
for ( int i = 0 ; i < len ; i ++ )
{
int tnum = i ;
# 504 "main.c"
__macc_set_gpu_num ( gpu_num ) ;
# 506 "main.c"
__macc_set_data_region (
gpu_num , ptrs [ tnum ] , multi ,
use_type [ tnum ] , use_lb_set [ tnum ] , use_ub_set [ tnum ] ,
def_type [ tnum ] , def_lb_set [ tnum ] , def_ub_set [ tnum ] ) ;
}
}
# 513 "main.c"
void __macc_init ( )
{
char * env_macc_numgpus = getenv ( "MACC_NUMGPUS" ) ;
# 517 "main.c"
if ( env_macc_numgpus != ( ( void * ) 0 ) ) {
__MACC_NUMGPUS = atoi ( env_macc_numgpus ) ;
}
else {
__MACC_NUMGPUS = __macc_get_num_gpus ( ) ;
}
# 524 "main.c"
if ( __MACC_NUMGPUS <= 0 ) {
fputs ( "[MACC ERROR] No GPU device found." , stderr ) ;
exit ( - 1 ) ;
}
# 529 "main.c"
__MACC_TOPOLOGY = malloc_managed ( __MACC_NUMGPUS * sizeof ( int ) ) ;
char * topo = getenv ( "MACC_TOPOLOGY" ) ;
# 532 "main.c"
if ( topo != ( ( void * ) 0 ) ) {
int i = 0 ;
topo = strtok ( topo , "," ) ;
while ( topo != ( ( void * ) 0 ) && i < __MACC_NUMGPUS ) {
__MACC_TOPOLOGY [ i ] = atoi ( topo ) ;
topo = strtok ( ( ( void * ) 0 ) , "," ) ;
i ++ ;
}
} else {
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ )
__MACC_TOPOLOGY [ i ] = i ;
}
# 545 "main.c"
# 558 "main.c"
__MACC_DATA_TABLE_SET = calloc_managed ( __MACC_NUMGPUS , sizeof ( struct __MaccDataTable ) ) ;
__MACC_DATA_WRAP_CACHE_SET = calloc_managed ( __MACC_NUMGPUS , sizeof ( struct __MaccDataWrapCache ) ) ;
# 561 "main.c"
for ( int t = 0 ; t < 10 ; t ++ ) {
printf ( "[MACC] Wake up (%d)\n" , t ) ;
# 565 "main.c"
int n = 256 * 1024 * 1024 ;
int * tmp = malloc_managed ( n * sizeof ( int ) ) ;
# 568 "main.c"
# 568 "main.c"
#pragma acc data copy ( tmp [ 0 : n ] )
{
# 570 "main.c"
#pragma acc parallel loop num_gangs ( 512 ) vector_length ( 1024 ) gang vector
# 572 "main.c"
for ( int i = 1 ; i < n ; i ++ )
tmp [ i ] = i ;
# 575 "main.c"
# 575 "main.c"
#pragma acc parallel loop num_gangs ( 512 ) vector_length ( 1024 ) gang vector
# 577 "main.c"
for ( int i = 1 ; i < n ; i ++ )
tmp [ n - i ] += i ;
}
# 581 "main.c"
free_managed ( tmp ) ;
}
}
# 598 "main.c"
# 601 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 602 "main.c"
# 602 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 603 "main.c"
# 604 "main.c"
void c_print_results ( char * name ,
char class ,
int n1 ,
int n2 ,
int n3 ,
int niter ,
double t ,
double mops ,
char * optype ,
int passed_verification ,
char * npbversion ,
char * compiletime ,
char * cc ,
char * clink ,
char * c_lib ,
char * c_inc ,
char * cflags ,
char * clinkflags )
{
printf ( "\n\n %s Benchmark Completed\n" , name ) ;
# 625 "main.c"
printf ( " Class = %c\n" , class ) ;
# 627 "main.c"
if ( n3 == 0 ) {
long nn = n1 ;
if ( n2 != 0 ) nn *= n2 ;
printf ( " Size = %12ld\n" , nn ) ;
}
else
printf ( " Size = %4dx%4dx%4d\n" , n1 , n2 , n3 ) ;
# 635 "main.c"
printf ( " Iterations = %12d\n" , niter ) ;
printf ( " Time in seconds = %12.2f\n" , t ) ;
# 639 "main.c"
printf ( " Mop/s total = %12.2f\n" , mops ) ;
# 641 "main.c"
printf ( " Operation type = %24s\n" , optype ) ;
# 643 "main.c"
if ( passed_verification < 0 )
printf ( " Verification = NOT PERFORMED\n" ) ;
else if ( passed_verification )
printf ( " Verification = SUCCESSFUL\n" ) ;
else
printf ( " Verification = UNSUCCESSFUL\n" ) ;
# 650 "main.c"
printf ( " Version = %12s\n" , npbversion ) ;
# 652 "main.c"
printf ( " Compile date = %12s\n" , compiletime ) ;
# 654 "main.c"
printf ( "\n Compile options:\n" ) ;
# 656 "main.c"
printf ( " CC = %s\n" , cc ) ;
# 658 "main.c"
printf ( " CLINK = %s\n" , clink ) ;
# 660 "main.c"
printf ( " C_LIB = %s\n" , c_lib ) ;
# 662 "main.c"
printf ( " C_INC = %s\n" , c_inc ) ;
# 664 "main.c"
printf ( " CFLAGS = %s\n" , cflags ) ;
# 666 "main.c"
printf ( " CLINKFLAGS = %s\n" , clinkflags ) ;
# 672 "main.c"
printf ( "\n--------------------------------------\n" ) ;
printf ( " Please send all errors/feedbacks to:\n" ) ;
printf ( " Center for Manycore Programming\n" ) ;
printf ( " cmp@aces.snu.ac.kr\n" ) ;
printf ( " http://aces.snu.ac.kr\n" ) ;
printf ( "--------------------------------------\n" ) ;
}
# 680 "main.c"
# 1 "../../common/wtime.h"
# 3 "../../common/wtime.h"
# 681 "main.c"
# 681 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 682 "main.c"
# 683 "main.c"
void wtime_ ( double * ) ;
# 687 "main.c"
static double elapsed_time ( void )
{
double t ;
# 694 "main.c"
wtime_ ( & t ) ;
return ( t ) ;
}
# 699 "main.c"
static double start [ 64 ] , elapsed [ 64 ] ;
# 701 "main.c"
void timer_clear ( int n )
{
elapsed [ n ] = 0.0 ;
}
# 710 "main.c"
void timer_start ( int n )
{
start [ n ] = elapsed_time ( ) ;
}
# 719 "main.c"
void timer_stop ( int n )
{
double t , now ;
# 726 "main.c"
now = elapsed_time ( ) ;
t = now - start [ n ] ;
elapsed [ n ] += t ;
# 730 "main.c"
}
# 733 "main.c"
double timer_read ( int n )
{
return ( elapsed [ n ] ) ;
}
# 741 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 742 "main.c"
# 742 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 29 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 35 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double tgamma ( double ) ;
float tgammaf ( float ) ;
# 38 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double round ( double ) ;
float roundf ( float ) ;
long int lround ( double ) ;
long int lroundf ( float ) ;
long long int llround ( double ) ;
long long int llroundf ( float ) ;
# 59 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 1 "/usr/include/math.h" <System_Header>
# 17 "/usr/include/math.h" <System_Header>
# 21 "/usr/include/math.h" <System_Header>
# 26 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 27 "/usr/include/math.h" <System_Header>
# 30 "/usr/include/math.h" <System_Header>
# 31 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header>
# 31 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 32 "/usr/include/math.h" <System_Header>
# 34 "/usr/include/math.h" <System_Header>
# 35 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 18 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 40 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 41 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
typedef union { unsigned char __c [ 8 ] ; double __d ; } __huge_val_t ;
# 50 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
static __huge_val_t __huge_val = { { 0 , 0 , 0 , 0 , 0 , 0 , 0xf0 , 0x7f } } ;
# 36 "/usr/include/math.h" <System_Header>
# 37 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
# 18 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
typedef union { unsigned char __c [ 4 ] ; float __f ; } __huge_valf_t ;
# 48 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
static __huge_valf_t __huge_valf = { { 0 , 0 , 0x80 , 0x7f } } ;
# 38 "/usr/include/math.h" <System_Header>
# 38 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header>
# 18 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header>
static union { unsigned char __c [ 12 ] ; long double __ld ; } __huge_vall = { { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0x80 , 0xff , 0x7f , 0 , 0 } } ;
# 39 "/usr/include/math.h" <System_Header>
# 40 "/usr/include/math.h" <System_Header>
# 41 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header>
# 42 "/usr/include/math.h" <System_Header>
# 43 "/usr/include/math.h" <System_Header>
# 44 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 40 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 48 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
static union { unsigned char __c [ 4 ] ; float __d ; } __qnan_union
__attribute__ ( ( __unused__ ) ) = { { 0 , 0 , 0xc0 , 0x7f } } ;
# 45 "/usr/include/math.h" <System_Header>
# 47 "/usr/include/math.h" <System_Header>
# 48 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
typedef float float_t ;
typedef double double_t ;
# 41 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 46 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 49 "/usr/include/math.h" <System_Header>
# 53 "/usr/include/math.h" <System_Header>
# 83 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double acos ( double __x ) ; extern double __acos ( double __x ) ;
extern double asin ( double __x ) ; extern double __asin ( double __x ) ;
extern double atan ( double __x ) ; extern double __atan ( double __x ) ;
extern double atan2 ( double __y , double __x ) ; extern double __atan2 ( double __y , double __x ) ;
# 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double cos ( double __x ) ; extern double __cos ( double __x ) ;
extern double sin ( double __x ) ; extern double __sin ( double __x ) ;
extern double tan ( double __x ) ; extern double __tan ( double __x ) ;
# 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double cosh ( double __x ) ; extern double __cosh ( double __x ) ;
extern double sinh ( double __x ) ; extern double __sinh ( double __x ) ;
extern double tanh ( double __x ) ; extern double __tanh ( double __x ) ;
# 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double acosh ( double __x ) ; extern double __acosh ( double __x ) ;
extern double asinh ( double __x ) ; extern double __asinh ( double __x ) ;
extern double atanh ( double __x ) ; extern double __atanh ( double __x ) ;
# 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double exp ( double __x ) ; extern double __exp ( double __x ) ;
# 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double frexp ( double __x , int * __exponent ) ; extern double __frexp ( double __x , int * __exponent ) ;
# 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double ldexp ( double __x , int __exponent ) ; extern double __ldexp ( double __x , int __exponent ) ;
# 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log ( double __x ) ; extern double __log ( double __x ) ;
# 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log10 ( double __x ) ; extern double __log10 ( double __x ) ;
# 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double modf ( double __x , double * __iptr ) ; extern double __modf ( double __x , double * __iptr ) ;
# 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double expm1 ( double __x ) ; extern double __expm1 ( double __x ) ;
# 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log1p ( double __x ) ; extern double __log1p ( double __x ) ;
# 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double logb ( double __x ) ; extern double __logb ( double __x ) ;
# 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double exp2 ( double __x ) ; extern double __exp2 ( double __x ) ;
# 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log2 ( double __x ) ; extern double __log2 ( double __x ) ;
# 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double pow ( double __x , double __y ) ; extern double __pow ( double __x , double __y ) ;
# 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double sqrt ( double __x ) ; extern double __sqrt ( double __x ) ;
# 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double hypot ( double __x , double __y ) ; extern double __hypot ( double __x , double __y ) ;
# 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double cbrt ( double __x ) ; extern double __cbrt ( double __x ) ;
# 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double ceil ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __ceil ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fabs ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __fabs ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double floor ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __floor ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fmod ( double __x , double __y ) ; extern double __fmod ( double __x , double __y ) ;
# 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isinf ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __finite ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isinf ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int finite ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double drem ( double __x , double __y ) ; extern double __drem ( double __x , double __y ) ;
# 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double significand ( double __x ) ; extern double __significand ( double __x ) ;
# 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double copysign ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __copysign ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nan ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern double __nan ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ;
# 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isnan ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isnan ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double j0 ( double ) ; extern double __j0 ( double ) ;
extern double j1 ( double ) ; extern double __j1 ( double ) ;
extern double jn ( int , double ) ; extern double __jn ( int , double ) ;
extern double y0 ( double ) ; extern double __y0 ( double ) ;
extern double y1 ( double ) ; extern double __y1 ( double ) ;
extern double yn ( int , double ) ; extern double __yn ( int , double ) ;
# 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double erf ( double ) ; extern double __erf ( double ) ;
extern double erfc ( double ) ; extern double __erfc ( double ) ;
extern double lgamma ( double ) ; extern double __lgamma ( double ) ;
# 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double tgamma ( double ) ; extern double __tgamma ( double ) ;
# 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double gamma ( double ) ; extern double __gamma ( double ) ;
# 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double lgamma_r ( double , int * __signgamp ) ; extern double __lgamma_r ( double , int * __signgamp ) ;
# 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double rint ( double __x ) ; extern double __rint ( double __x ) ;
# 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nextafter ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __nextafter ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nexttoward ( double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern double __nexttoward ( double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double remainder ( double __x , double __y ) ; extern double __remainder ( double __x , double __y ) ;
# 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double scalbn ( double __x , int __n ) ; extern double __scalbn ( double __x , int __n ) ;
# 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int ilogb ( double __x ) ; extern int __ilogb ( double __x ) ;
# 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double scalbln ( double __x , long int __n ) ; extern double __scalbln ( double __x , long int __n ) ;
# 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nearbyint ( double __x ) ; extern double __nearbyint ( double __x ) ;
# 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double round ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __round ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double trunc ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __trunc ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double remquo ( double __x , double __y , int * __quo ) ; extern double __remquo ( double __x , double __y , int * __quo ) ;
# 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lrint ( double __x ) ; extern long int __lrint ( double __x ) ;
# 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llrint ( double __x ) ; extern long long int __llrint ( double __x ) ;
# 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lround ( double __x ) ; extern long int __lround ( double __x ) ;
# 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llround ( double __x ) ; extern long long int __llround ( double __x ) ;
# 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fdim ( double __x , double __y ) ; extern double __fdim ( double __x , double __y ) ;
# 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fmax ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __fmax ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fmin ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __fmin ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __fpclassify ( double __value )
__attribute__ ( ( __const__ ) ) ;
# 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __signbit ( double __value )
__attribute__ ( ( __const__ ) ) ;
# 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fma ( double __x , double __y , double __z ) ; extern double __fma ( double __x , double __y , double __z ) ;
# 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double scalb ( double __x , double __n ) ; extern double __scalb ( double __x , double __n ) ;
# 84 "/usr/include/math.h" <System_Header>
# 94 "/usr/include/math.h" <System_Header>
# 104 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float acosf ( float __x ) ; extern float __acosf ( float __x ) ;
extern float asinf ( float __x ) ; extern float __asinf ( float __x ) ;
extern float atanf ( float __x ) ; extern float __atanf ( float __x ) ;
extern float atan2f ( float __y , float __x ) ; extern float __atan2f ( float __y , float __x ) ;
# 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float cosf ( float __x ) ; extern float __cosf ( float __x ) ;
extern float sinf ( float __x ) ; extern float __sinf ( float __x ) ;
extern float tanf ( float __x ) ; extern float __tanf ( float __x ) ;
# 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float coshf ( float __x ) ; extern float __coshf ( float __x ) ;
extern float sinhf ( float __x ) ; extern float __sinhf ( float __x ) ;
extern float tanhf ( float __x ) ; extern float __tanhf ( float __x ) ;
# 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float acoshf ( float __x ) ; extern float __acoshf ( float __x ) ;
extern float asinhf ( float __x ) ; extern float __asinhf ( float __x ) ;
extern float atanhf ( float __x ) ; extern float __atanhf ( float __x ) ;
# 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float expf ( float __x ) ; extern float __expf ( float __x ) ;
# 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float frexpf ( float __x , int * __exponent ) ; extern float __frexpf ( float __x , int * __exponent ) ;
# 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float ldexpf ( float __x , int __exponent ) ; extern float __ldexpf ( float __x , int __exponent ) ;
# 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float logf ( float __x ) ; extern float __logf ( float __x ) ;
# 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float log10f ( float __x ) ; extern float __log10f ( float __x ) ;
# 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float modff ( float __x , float * __iptr ) ; extern float __modff ( float __x , float * __iptr ) ;
# 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float expm1f ( float __x ) ; extern float __expm1f ( float __x ) ;
# 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float log1pf ( float __x ) ; extern float __log1pf ( float __x ) ;
# 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float logbf ( float __x ) ; extern float __logbf ( float __x ) ;
# 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float exp2f ( float __x ) ; extern float __exp2f ( float __x ) ;
# 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float log2f ( float __x ) ; extern float __log2f ( float __x ) ;
# 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float powf ( float __x , float __y ) ; extern float __powf ( float __x , float __y ) ;
# 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float sqrtf ( float __x ) ; extern float __sqrtf ( float __x ) ;
# 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float hypotf ( float __x , float __y ) ; extern float __hypotf ( float __x , float __y ) ;
# 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float cbrtf ( float __x ) ; extern float __cbrtf ( float __x ) ;
# 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float ceilf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __ceilf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fabsf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __fabsf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float floorf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __floorf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fmodf ( float __x , float __y ) ; extern float __fmodf ( float __x , float __y ) ;
# 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isinff ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __finitef ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isinff ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int finitef ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float dremf ( float __x , float __y ) ; extern float __dremf ( float __x , float __y ) ;
# 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float significandf ( float __x ) ; extern float __significandf ( float __x ) ;
# 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float copysignf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __copysignf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nanf ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern float __nanf ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ;
# 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isnanf ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isnanf ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float j0f ( float ) ; extern float __j0f ( float ) ;
extern float j1f ( float ) ; extern float __j1f ( float ) ;
extern float jnf ( int , float ) ; extern float __jnf ( int , float ) ;
extern float y0f ( float ) ; extern float __y0f ( float ) ;
extern float y1f ( float ) ; extern float __y1f ( float ) ;
extern float ynf ( int , float ) ; extern float __ynf ( int , float ) ;
# 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float erff ( float ) ; extern float __erff ( float ) ;
extern float erfcf ( float ) ; extern float __erfcf ( float ) ;
extern float lgammaf ( float ) ; extern float __lgammaf ( float ) ;
# 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float tgammaf ( float ) ; extern float __tgammaf ( float ) ;
# 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float gammaf ( float ) ; extern float __gammaf ( float ) ;
# 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float lgammaf_r ( float , int * __signgamp ) ; extern float __lgammaf_r ( float , int * __signgamp ) ;
# 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float rintf ( float __x ) ; extern float __rintf ( float __x ) ;
# 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nextafterf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __nextafterf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nexttowardf ( float __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern float __nexttowardf ( float __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float remainderf ( float __x , float __y ) ; extern float __remainderf ( float __x , float __y ) ;
# 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float scalbnf ( float __x , int __n ) ; extern float __scalbnf ( float __x , int __n ) ;
# 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int ilogbf ( float __x ) ; extern int __ilogbf ( float __x ) ;
# 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float scalblnf ( float __x , long int __n ) ; extern float __scalblnf ( float __x , long int __n ) ;
# 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nearbyintf ( float __x ) ; extern float __nearbyintf ( float __x ) ;
# 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float roundf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __roundf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float truncf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __truncf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float remquof ( float __x , float __y , int * __quo ) ; extern float __remquof ( float __x , float __y , int * __quo ) ;
# 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lrintf ( float __x ) ; extern long int __lrintf ( float __x ) ;
# 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llrintf ( float __x ) ; extern long long int __llrintf ( float __x ) ;
# 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lroundf ( float __x ) ; extern long int __lroundf ( float __x ) ;
# 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llroundf ( float __x ) ; extern long long int __llroundf ( float __x ) ;
# 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fdimf ( float __x , float __y ) ; extern float __fdimf ( float __x , float __y ) ;
# 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fmaxf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __fmaxf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fminf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __fminf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __fpclassifyf ( float __value )
__attribute__ ( ( __const__ ) ) ;
# 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __signbitf ( float __value )
__attribute__ ( ( __const__ ) ) ;
# 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fmaf ( float __x , float __y , float __z ) ; extern float __fmaf ( float __x , float __y , float __z ) ;
# 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float scalbf ( float __x , float __n ) ; extern float __scalbf ( float __x , float __n ) ;
# 105 "/usr/include/math.h" <System_Header>
# 140 "/usr/include/math.h" <System_Header>
# 151 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double acosl ( long double __x ) ; extern long double __acosl ( long double __x ) ;
extern long double asinl ( long double __x ) ; extern long double __asinl ( long double __x ) ;
extern long double atanl ( long double __x ) ; extern long double __atanl ( long double __x ) ;
extern long double atan2l ( long double __y , long double __x ) ; extern long double __atan2l ( long double __y , long double __x ) ;
# 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double cosl ( long double __x ) ; extern long double __cosl ( long double __x ) ;
extern long double sinl ( long double __x ) ; extern long double __sinl ( long double __x ) ;
extern long double tanl ( long double __x ) ; extern long double __tanl ( long double __x ) ;
# 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double coshl ( long double __x ) ; extern long double __coshl ( long double __x ) ;
extern long double sinhl ( long double __x ) ; extern long double __sinhl ( long double __x ) ;
extern long double tanhl ( long double __x ) ; extern long double __tanhl ( long double __x ) ;
# 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double acoshl ( long double __x ) ; extern long double __acoshl ( long double __x ) ;
extern long double asinhl ( long double __x ) ; extern long double __asinhl ( long double __x ) ;
extern long double atanhl ( long double __x ) ; extern long double __atanhl ( long double __x ) ;
# 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double expl ( long double __x ) ; extern long double __expl ( long double __x ) ;
# 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double frexpl ( long double __x , int * __exponent ) ; extern long double __frexpl ( long double __x , int * __exponent ) ;
# 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double ldexpl ( long double __x , int __exponent ) ; extern long double __ldexpl ( long double __x , int __exponent ) ;
# 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double logl ( long double __x ) ; extern long double __logl ( long double __x ) ;
# 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double log10l ( long double __x ) ; extern long double __log10l ( long double __x ) ;
# 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double modfl ( long double __x , long double * __iptr ) ; extern long double __modfl ( long double __x , long double * __iptr ) ;
# 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double expm1l ( long double __x ) ; extern long double __expm1l ( long double __x ) ;
# 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double log1pl ( long double __x ) ; extern long double __log1pl ( long double __x ) ;
# 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double logbl ( long double __x ) ; extern long double __logbl ( long double __x ) ;
# 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double exp2l ( long double __x ) ; extern long double __exp2l ( long double __x ) ;
# 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double log2l ( long double __x ) ; extern long double __log2l ( long double __x ) ;
# 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double powl ( long double __x , long double __y ) ; extern long double __powl ( long double __x , long double __y ) ;
# 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double sqrtl ( long double __x ) ; extern long double __sqrtl ( long double __x ) ;
# 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double hypotl ( long double __x , long double __y ) ; extern long double __hypotl ( long double __x , long double __y ) ;
# 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double cbrtl ( long double __x ) ; extern long double __cbrtl ( long double __x ) ;
# 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double ceill ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __ceill ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fabsl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __fabsl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double floorl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __floorl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fmodl ( long double __x , long double __y ) ; extern long double __fmodl ( long double __x , long double __y ) ;
# 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isinfl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __finitel ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isinfl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int finitel ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double dreml ( long double __x , long double __y ) ; extern long double __dreml ( long double __x , long double __y ) ;
# 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double significandl ( long double __x ) ; extern long double __significandl ( long double __x ) ;
# 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double copysignl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __copysignl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nanl ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern long double __nanl ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ;
# 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isnanl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isnanl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double j0l ( long double ) ; extern long double __j0l ( long double ) ;
extern long double j1l ( long double ) ; extern long double __j1l ( long double ) ;
extern long double jnl ( int , long double ) ; extern long double __jnl ( int , long double ) ;
extern long double y0l ( long double ) ; extern long double __y0l ( long double ) ;
extern long double y1l ( long double ) ; extern long double __y1l ( long double ) ;
extern long double ynl ( int , long double ) ; extern long double __ynl ( int , long double ) ;
# 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double erfl ( long double ) ; extern long double __erfl ( long double ) ;
extern long double erfcl ( long double ) ; extern long double __erfcl ( long double ) ;
extern long double lgammal ( long double ) ; extern long double __lgammal ( long double ) ;
# 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double tgammal ( long double ) ; extern long double __tgammal ( long double ) ;
# 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double gammal ( long double ) ; extern long double __gammal ( long double ) ;
# 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double lgammal_r ( long double , int * __signgamp ) ; extern long double __lgammal_r ( long double , int * __signgamp ) ;
# 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double rintl ( long double __x ) ; extern long double __rintl ( long double __x ) ;
# 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nextafterl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __nextafterl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nexttowardl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __nexttowardl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double remainderl ( long double __x , long double __y ) ; extern long double __remainderl ( long double __x , long double __y ) ;
# 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double scalbnl ( long double __x , int __n ) ; extern long double __scalbnl ( long double __x , int __n ) ;
# 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int ilogbl ( long double __x ) ; extern int __ilogbl ( long double __x ) ;
# 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double scalblnl ( long double __x , long int __n ) ; extern long double __scalblnl ( long double __x , long int __n ) ;
# 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nearbyintl ( long double __x ) ; extern long double __nearbyintl ( long double __x ) ;
# 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double roundl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __roundl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double truncl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __truncl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double remquol ( long double __x , long double __y , int * __quo ) ; extern long double __remquol ( long double __x , long double __y , int * __quo ) ;
# 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lrintl ( long double __x ) ; extern long int __lrintl ( long double __x ) ;
# 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llrintl ( long double __x ) ; extern long long int __llrintl ( long double __x ) ;
# 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lroundl ( long double __x ) ; extern long int __lroundl ( long double __x ) ;
# 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llroundl ( long double __x ) ; extern long long int __llroundl ( long double __x ) ;
# 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fdiml ( long double __x , long double __y ) ; extern long double __fdiml ( long double __x , long double __y ) ;
# 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fmaxl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __fmaxl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fminl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __fminl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __fpclassifyl ( long double __value )
__attribute__ ( ( __const__ ) ) ;
# 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __signbitl ( long double __value )
__attribute__ ( ( __const__ ) ) ;
# 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fmal ( long double __x , long double __y , long double __z ) ; extern long double __fmal ( long double __x , long double __y , long double __z ) ;
# 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double scalbl ( long double __x , long double __n ) ; extern long double __scalbl ( long double __x , long double __n ) ;
# 152 "/usr/include/math.h" <System_Header>
# 167 "/usr/include/math.h" <System_Header>
extern int signgam ;
# 172 "/usr/include/math.h" <System_Header>
# 206 "/usr/include/math.h" <System_Header>
# 208 "/usr/include/math.h" <System_Header>
enum
{
FP_NAN =
# 213 "/usr/include/math.h" <System_Header>
0 ,
FP_INFINITE =
# 216 "/usr/include/math.h" <System_Header>
1 ,
FP_ZERO =
# 219 "/usr/include/math.h" <System_Header>
2 ,
FP_SUBNORMAL =
# 222 "/usr/include/math.h" <System_Header>
3 ,
FP_NORMAL =
# 225 "/usr/include/math.h" <System_Header>
4
} ;
# 230 "/usr/include/math.h" <System_Header>
# 232 "/usr/include/math.h" <System_Header>
# 248 "/usr/include/math.h" <System_Header>
# 268 "/usr/include/math.h" <System_Header>
# 282 "/usr/include/math.h" <System_Header>
# 290 "/usr/include/math.h" <System_Header>
# 304 "/usr/include/math.h" <System_Header>
# 318 "/usr/include/math.h" <System_Header>
# 324 "/usr/include/math.h" <System_Header>
# 346 "/usr/include/math.h" <System_Header>
typedef enum
{
_IEEE_ = - 1 ,
_SVID_ ,
_XOPEN_ ,
_POSIX_ ,
_ISOC_
} _LIB_VERSION_TYPE ;
# 358 "/usr/include/math.h" <System_Header>
extern _LIB_VERSION_TYPE _LIB_VERSION ;
# 368 "/usr/include/math.h" <System_Header>
# 372 "/usr/include/math.h" <System_Header>
struct exception
# 374 "/usr/include/math.h" <System_Header>
{
int type ;
char * name ;
double arg1 ;
double arg2 ;
double retval ;
} ;
# 385 "/usr/include/math.h" <System_Header>
extern int matherr ( struct exception * __exc ) ;
# 390 "/usr/include/math.h" <System_Header>
# 398 "/usr/include/math.h" <System_Header>
# 411 "/usr/include/math.h" <System_Header>
# 430 "/usr/include/math.h" <System_Header>
# 450 "/usr/include/math.h" <System_Header>
# 470 "/usr/include/math.h" <System_Header>
# 476 "/usr/include/math.h" <System_Header>
# 482 "/usr/include/math.h" <System_Header>
# 484 "/usr/include/math.h" <System_Header>
# 492 "/usr/include/math.h" <System_Header>
# 500 "/usr/include/math.h" <System_Header>
# 508 "/usr/include/math.h" <System_Header>
# 516 "/usr/include/math.h" <System_Header>
# 524 "/usr/include/math.h" <System_Header>
# 60 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 254 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 301 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 310 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_acos ( double ) ;
# 313 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_asin ( double ) ;
# 316 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_atan2 ( double , double ) ;
# 319 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_atan ( double ) ;
# 322 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_tan ( double ) ;
# 325 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_cos ( double ) ;
# 328 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_sin ( double ) ;
# 331 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_fabs ( double ) ;
# 334 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_sqrt ( double ) ;
# 337 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_log ( double ) ;
# 340 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_log10 ( double ) ;
# 343 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_exp ( double ) ;
# 346 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_pow ( double , double ) ;
# 350 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_fmin ( double , double ) ;
# 353 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_fminf ( float , float ) ;
# 356 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_fmax ( double , double ) ;
# 359 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_fmaxf ( float , float ) ;
# 362 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_acosf ( float ) ;
# 365 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_asinf ( float ) ;
# 368 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_atan2f ( float , float ) ;
# 371 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_atanf ( float ) ;
# 374 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_tanf ( float ) ;
# 377 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_cosf ( float ) ;
# 380 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_sinf ( float ) ;
# 383 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_fabsf ( float ) ;
# 386 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_sqrtf ( float ) ;
# 389 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_logf ( float ) ;
# 392 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_log10f ( float ) ;
# 395 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_expf ( float ) ;
# 398 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_powf ( float , float ) ;
# 406 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 418 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
#pragma libm ( acosf , acoshf , asinf , asinhf , atanhf , atan2f )
#pragma libm ( cbrtf , ceilf , copysignf , cosf , coshf )
#pragma libm ( erff , erfcf , expf , exp2f , exp10f , expm1f )
#pragma libm ( fabsf , floorf , fmaf , fminf , fmaxf )
#pragma libm ( ilogbf )
#pragma libm ( ldexpf , lgammaf , llrintf , llroundf , logbf , log1pf , logf , log2f , log10f , lrintf , lroundf )
#pragma libm ( nanf , nearbyintf , nextafterf )
#pragma libm ( powf )
#pragma libm ( remainderf , remquof , rintf , roundf , rsqrtf )
#pragma libm ( scalblnf , scalbnf , sinf , sinhf , sqrtf )
#pragma libm ( tanf , tanhf , tgammaf , truncf )
#pragma libm ( abs , acos , acosh , asin , asinh , atanh , atan2 )
#pragma libm ( cbrt , ceil , copysign , cos , cosh )
#pragma libm ( erf , erfc , exp , exp2 , exp10 , expm1 )
#pragma libm ( fabs , floor , fma , fmin , fmax )
#pragma libm ( ilogb , isinf , isfinite , isnan )
#pragma libm ( ldexp , lgamma , llrint , llround , logb , log1p , log , log2 , log10 , lrint , lround )
#pragma libm ( pow )
#pragma libm ( nan , nearbyint , nextafter )
#pragma libm ( remainder , remquo , rint , round , rsqrt )
#pragma libm ( scalbln , scalbn , sin , sinh , sqrt )
#pragma libm ( tan , tanh , tgamma , trunc )
# 743 "main.c"
# 743 "main.c"
# 1 "../../common/type.h"
# 4 "../../common/type.h"
typedef enum { false , true } logical ;
typedef struct {
double real ;
double imag ;
} dcomplex ;
# 744 "main.c"
# 746 "main.c"
void print_results ( char * name , char class , int n1 , int n2 , int n3 , int niter ,
double t , double mops , char * optype , logical verified , char * npbversion ,
char * compiletime , char * cs1 , char * cs2 , char * cs3 , char * cs4 , char * cs5 ,
char * cs6 , char * cs7 )
{
char size [ 16 ] ;
int j ;
# 754 "main.c"
printf ( "\n\n %s Benchmark Completed.\n" , name ) ;
printf ( " Class = %12c\n" , class ) ;
# 757 "main.c"
# 762 "main.c"
if ( ( n2 == 0 ) && ( n3 == 0 ) ) {
if ( ( name [ 0 ] == 'E' ) && ( name [ 1 ] == 'P' ) ) {
sprintf ( size , "%15.0lf" , __builtin_pow ( 2.0 , n1 ) ) ;
j = 14 ;
if ( size [ j ] == '.' ) {
size [ j ] = ' ' ;
j -- ;
}
size [ j + 1 ] = '\0' ;
printf ( " Size = %15s\n" , size ) ;
} else {
printf ( " Size = %12d\n" , n1 ) ;
}
} else {
printf ( " Size = %4dx%4dx%4d\n" , n1 , n2 , n3 ) ;
}
# 779 "main.c"
printf ( " Iterations = %12d\n" , niter ) ;
printf ( " Time in seconds = %12.2lf\n" , t ) ;
printf ( " Mop/s total = %15.2lf\n" , mops ) ;
printf ( " Operation type = %24s\n" , optype ) ;
if ( verified )
printf ( " Verification = %12s\n" , "SUCCESSFUL" ) ;
else
printf ( " Verification = %12s\n" , "UNSUCCESSFUL" ) ;
printf ( " Version = %12s\n" , npbversion ) ;
printf ( " Compile date = %12s\n" , compiletime ) ;
printf ( "\n Compile options:\n"
" CC = %s\n" , cs1 ) ;
printf ( " CLINK = %s\n" , cs2 ) ;
printf ( " C_LIB = %s\n" , cs3 ) ;
printf ( " C_INC = %s\n" , cs4 ) ;
printf ( " CFLAGS = %s\n" , cs5 ) ;
printf ( " CLINKFLAGS = %s\n" , cs6 ) ;
printf ( " RAND = %s\n" , cs7 ) ;
# 799 "main.c"
printf ( "\n--------------------------------------\n"
" Please send all errors/feedbacks to:\n"
" Center for Manycore Programming\n"
" cmp@aces.snu.ac.kr\n"
" http://aces.snu.ac.kr\n"
"--------------------------------------\n\n" ) ;
}
# 806 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 807 "main.c"
# 807 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 808 "main.c"
# 809 "main.c"
double randlc ( double * x , double a )
{
# 834 "main.c"
# 841 "main.c"
const double r23 = 1.1920928955078125e-07 ;
const double r46 = r23 * r23 ;
const double t23 = 8.388608e+06 ;
const double t46 = t23 * t23 ;
# 846 "main.c"
double t1 , t2 , t3 , t4 , a1 , a2 , x1 , x2 , z ;
double r ;
# 849 "main.c"
t1 = r23 * a ;
a1 = ( int ) t1 ;
a2 = a - t23 * a1 ;
# 856 "main.c"
t1 = r23 * ( * x ) ;
x1 = ( int ) t1 ;
x2 = * x - t23 * x1 ;
t1 = a1 * x2 + a2 * x1 ;
t2 = ( int ) ( r23 * t1 ) ;
z = t1 - t23 * t2 ;
t3 = t23 * z + a2 * x2 ;
t4 = ( int ) ( r46 * t3 ) ;
* x = t3 - t46 * t4 ;
r = r46 * ( * x ) ;
# 872 "main.c"
return r ;
}
# 876 "main.c"
void vranlc ( int n , double * x , double a , double y [ ] )
{
# 901 "main.c"
# 908 "main.c"
const double r23 = 1.1920928955078125e-07 ;
const double r46 = r23 * r23 ;
const double t23 = 8.388608e+06 ;
const double t46 = t23 * t23 ;
# 913 "main.c"
double t1 , t2 , t3 , t4 , a1 , a2 , x1 , x2 , z ;
# 915 "main.c"
int i ;
# 917 "main.c"
t1 = r23 * a ;
a1 = ( int ) t1 ;
a2 = a - t23 * a1 ;
# 924 "main.c"
for ( i = 0 ; i < n ; i ++ ) {
t1 = r23 * ( * x ) ;
x1 = ( int ) t1 ;
x2 = * x - t23 * x1 ;
t1 = a1 * x2 + a2 * x1 ;
t2 = ( int ) ( r23 * t1 ) ;
z = t1 - t23 * t2 ;
t3 = t23 * z + a2 * x2 ;
t4 = ( int ) ( r46 * t3 ) ;
* x = t3 - t46 * t4 ;
y [ i ] = r46 * ( * x ) ;
}
# 945 "main.c"
return ;
}
# 948 "main.c"
# 1 "../../common/wtime.h"
# 3 "../../common/wtime.h"
# 949 "main.c"
# 949 "main.c"
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 27 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 28 "/usr/include/time.h" <System_Header>
# 34 "/usr/include/time.h" <System_Header>
# 37 "/usr/include/time.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 38 "/usr/include/time.h" <System_Header>
# 40 "/usr/include/time.h" <System_Header>
# 41 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 44 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 47 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 60 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 62 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 64 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 68 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 70 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 72 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 74 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 76 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 78 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 80 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 83 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 42 "/usr/include/time.h" <System_Header>
# 43 "/usr/include/time.h" <System_Header>
# 132 "/usr/include/time.h" <System_Header>
struct tm
{
int tm_sec ;
int tm_min ;
int tm_hour ;
int tm_mday ;
int tm_mon ;
int tm_year ;
int tm_wday ;
int tm_yday ;
int tm_isdst ;
# 146 "/usr/include/time.h" <System_Header>
long int tm_gmtoff ;
const char * tm_zone ;
# 152 "/usr/include/time.h" <System_Header>
} ;
# 160 "/usr/include/time.h" <System_Header>
struct itimerspec
{
struct timespec it_interval ;
struct timespec it_value ;
} ;
# 167 "/usr/include/time.h" <System_Header>
struct sigevent ;
# 188 "/usr/include/time.h" <System_Header>
extern clock_t clock ( void ) ;
# 191 "/usr/include/time.h" <System_Header>
extern time_t time ( time_t * __timer ) ;
# 194 "/usr/include/time.h" <System_Header>
extern double difftime ( time_t __time1 , time_t __time0 )
__attribute__ ( ( __const__ ) ) ;
# 198 "/usr/include/time.h" <System_Header>
extern time_t mktime ( struct tm * __tp ) ;
# 204 "/usr/include/time.h" <System_Header>
extern size_t strftime ( char * __restrict __s , size_t __maxsize ,
const char * __restrict __format ,
const struct tm * __restrict __tp ) ;
# 220 "/usr/include/time.h" <System_Header>
# 221 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/xlocale.h" <System_Header>
# 18 "/usr/include/xlocale.h" <System_Header>
# 222 "/usr/include/time.h" <System_Header>
# 223 "/usr/include/time.h" <System_Header>
extern size_t strftime_l ( char * __restrict __s , size_t __maxsize ,
const char * __restrict __format ,
const struct tm * __restrict __tp ,
__locale_t __loc ) ;
# 238 "/usr/include/time.h" <System_Header>
extern struct tm * gmtime ( const time_t * __timer ) ;
# 242 "/usr/include/time.h" <System_Header>
extern struct tm * localtime ( const time_t * __timer ) ;
# 248 "/usr/include/time.h" <System_Header>
extern struct tm * gmtime_r ( const time_t * __restrict __timer ,
struct tm * __restrict __tp ) ;
# 253 "/usr/include/time.h" <System_Header>
extern struct tm * localtime_r ( const time_t * __restrict __timer ,
struct tm * __restrict __tp ) ;
# 260 "/usr/include/time.h" <System_Header>
extern char * asctime ( const struct tm * __tp ) ;
# 263 "/usr/include/time.h" <System_Header>
extern char * ctime ( const time_t * __timer ) ;
# 268 "/usr/include/time.h" <System_Header>
# 271 "/usr/include/time.h" <System_Header>
extern char * asctime_r ( const struct tm * __restrict __tp ,
char * __restrict __buf ) ;
# 275 "/usr/include/time.h" <System_Header>
extern char * ctime_r ( const time_t * __restrict __timer ,
char * __restrict __buf ) ;
# 281 "/usr/include/time.h" <System_Header>
extern char * __tzname [ 2 ] ;
extern int __daylight ;
extern long int __timezone ;
# 288 "/usr/include/time.h" <System_Header>
extern char * tzname [ 2 ] ;
# 292 "/usr/include/time.h" <System_Header>
extern void tzset ( void ) ;
# 297 "/usr/include/time.h" <System_Header>
extern int daylight ;
extern long int timezone ;
# 303 "/usr/include/time.h" <System_Header>
extern int stime ( const time_t * __when ) ;
# 309 "/usr/include/time.h" <System_Header>
# 316 "/usr/include/time.h" <System_Header>
# 318 "/usr/include/time.h" <System_Header>
extern time_t timegm ( struct tm * __tp ) ;
# 321 "/usr/include/time.h" <System_Header>
extern time_t timelocal ( struct tm * __tp ) ;
# 324 "/usr/include/time.h" <System_Header>
extern int dysize ( int __year ) __attribute__ ( ( __const__ ) ) ;
# 333 "/usr/include/time.h" <System_Header>
extern int nanosleep ( const struct timespec * __requested_time ,
struct timespec * __remaining ) ;
# 338 "/usr/include/time.h" <System_Header>
extern int clock_getres ( clockid_t __clock_id , struct timespec * __res ) ;
# 341 "/usr/include/time.h" <System_Header>
extern int clock_gettime ( clockid_t __clock_id , struct timespec * __tp ) ;
# 344 "/usr/include/time.h" <System_Header>
extern int clock_settime ( clockid_t __clock_id , const struct timespec * __tp )
;
# 352 "/usr/include/time.h" <System_Header>
extern int clock_nanosleep ( clockid_t __clock_id , int __flags ,
const struct timespec * __req ,
struct timespec * __rem ) ;
# 357 "/usr/include/time.h" <System_Header>
extern int clock_getcpuclockid ( pid_t __pid , clockid_t * __clock_id ) ;
# 362 "/usr/include/time.h" <System_Header>
extern int timer_create ( clockid_t __clock_id ,
struct sigevent * __restrict __evp ,
timer_t * __restrict __timerid ) ;
# 367 "/usr/include/time.h" <System_Header>
extern int timer_delete ( timer_t __timerid ) ;
# 370 "/usr/include/time.h" <System_Header>
extern int timer_settime ( timer_t __timerid , int __flags ,
const struct itimerspec * __restrict __value ,
struct itimerspec * __restrict __ovalue ) ;
# 375 "/usr/include/time.h" <System_Header>
extern int timer_gettime ( timer_t __timerid , struct itimerspec * __value )
;
# 379 "/usr/include/time.h" <System_Header>
extern int timer_getoverrun ( timer_t __timerid ) ;
# 950 "main.c"
# 951 "main.c"
# 1 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 19 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 54 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
struct timezone
{
int tz_minuteswest ;
int tz_dsttime ;
} ;
# 61 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
typedef struct timezone * __restrict __timezone_ptr_t ;
# 70 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int gettimeofday ( struct timeval * __restrict __tv ,
__timezone_ptr_t __tz ) ;
# 76 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int settimeofday ( const struct timeval * __tv ,
const struct timezone * __tz )
;
# 84 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int adjtime ( const struct timeval * __delta ,
struct timeval * __olddelta ) ;
# 90 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
enum __itimer_which
{
ITIMER_REAL = 0 ,
# 96 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
ITIMER_VIRTUAL = 1 ,
# 99 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
ITIMER_PROF = 2
# 103 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
} ;
# 106 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
struct itimerval
{
struct timeval it_interval ;
struct timeval it_value ;
} ;
# 120 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
typedef int __itimer_which_t ;
# 124 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int getitimer ( __itimer_which_t __which ,
struct itimerval * __value ) ;
# 130 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int setitimer ( __itimer_which_t __which ,
const struct itimerval * __restrict __new ,
struct itimerval * __restrict __old ) ;
# 137 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int utimes ( const char * __file , const struct timeval __tvp [ 2 ] )
;
# 142 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int lutimes ( const char * __file , const struct timeval __tvp [ 2 ] )
;
# 146 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int futimes ( int __fd , const struct timeval __tvp [ 2 ] ) ;
# 161 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 952 "main.c"
# 954 "main.c"
void wtime_ ( double * t )
{
static int sec = - 1 ;
struct timeval tv ;
gettimeofday ( & tv , ( void * ) 0 ) ;
if ( sec < 0 ) sec = tv . tv_sec ;
* t = ( tv . tv_sec - sec ) + 1.0e-6 * tv . tv_usec ;
}
# 963 "main.c"
# 992 "main.c"
# 997 "main.c"
# 1023 "main.c"
# 1027 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 1028 "main.c"
# 1028 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1029 "main.c"
# 1029 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 1030 "main.c"
# 1031 "main.c"
# 1 "../globals.h"
# 29 "../globals.h"
# 34 "../globals.h"
# 1 "../npbparams.h"
# 6 "../npbparams.h"
# 35 "../globals.h"
# 35 "../globals.h"
# 1 "../../common/type.h"
# 36 "../globals.h"
# 37 "../globals.h"
# 49 "../globals.h"
# 56 "../globals.h"
# 65 "../globals.h"
# 74 "../globals.h"
# 83 "../globals.h"
# 92 "../globals.h"
# 101 "../globals.h"
# 110 "../globals.h"
# 1032 "main.c"
# 1032 "main.c"
# 1 "../../common/randdp.h"
# 4 "../../common/randdp.h"
double randlc ( double * x , double a ) ;
void vranlc ( int n , double * x , double a , double y [ ] ) ;
# 1033 "main.c"
# 1033 "main.c"
# 1 "../../common/timers.h"
# 4 "../../common/timers.h"
void timer_clear ( int n ) ;
void timer_start ( int n ) ;
void timer_stop ( int n ) ;
double timer_read ( int n ) ;
# 1034 "main.c"
# 1034 "main.c"
# 1 "../../common/print_results.h"
# 4 "../../common/print_results.h"
void print_results ( char * name , char class , int n1 , int n2 , int n3 , int niter ,
double t , double mops , char * optype , int verified , char * npbversion ,
char * compiletime , char * cs1 , char * cs2 , char * cs3 , char * cs4 , char * cs5 ,
char * cs6 , char * cs7 ) ;
# 1035 "main.c"
# 1035 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 1036 "main.c"
# 1036 "main.c"
unsigned int nz = ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ;
unsigned int naz = ( 150000 * ( 15 + 1 ) ) ;
unsigned int na = 150000 ;
static int colidx [ ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ] ;
static int rowstr [ 150000 + 1 ] ;
static int iv [ 150000 ] ;
static int arow [ 150000 ] ;
static int acol [ ( 150000 * ( 15 + 1 ) ) ] ;
# 1047 "main.c"
static double aelt [ ( 150000 * ( 15 + 1 ) ) ] ;
static double a [ ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ] ;
static double x [ 150000 + 2 ] ;
static double z [ 150000 + 2 ] ;
static double p [ 150000 + 2 ] ;
static double q [ 150000 + 2 ] ;
static double r [ 150000 + 2 ] ;
# 1056 "main.c"
static int naa ;
static int nzz ;
static int firstrow ;
static int lastrow ;
static int firstcol ;
static int lastcol ;
# 1064 "main.c"
static double amult ;
static double tran ;
# 1068 "main.c"
static logical timeron ;
# 1073 "main.c"
static void conj_grad ( int colidx [ ] ,
int rowstr [ ] ,
double x [ ] ,
double z [ ] ,
double a [ ] ,
double p [ ] ,
double q [ ] ,
double r [ ] ,
double * rnorm ) ;
static void makea ( int n ,
int nz ,
double a [ ] ,
int colidx [ ] ,
int rowstr [ ] ,
int firstrow ,
int lastrow ,
int firstcol ,
int lastcol ,
int arow [ ] ,
int acol [ ] [ 15 + 1 ] ,
double aelt [ ] [ 15 + 1 ] ,
int iv [ ] ) ;
static void sparse ( double a [ ] ,
int colidx [ ] ,
int rowstr [ ] ,
int n ,
int nz ,
int nozer ,
int arow [ ] ,
int acol [ ] [ 15 + 1 ] ,
double aelt [ ] [ 15 + 1 ] ,
int firstrow ,
int lastrow ,
int nzloc [ ] ,
double rcond ,
double shift ) ;
static void sprnvc ( int n , int nz , int nn1 , double v [ ] , int iv [ ] ) ;
static int icnvrt ( double x , int ipwr2 ) ;
static void vecset ( int n , double v [ ] , int iv [ ] , int * nzv , int i , double val ) ;
static int conj_calls = 0 ;
static int loop_iter = 0 ;
# 1118 "main.c"
int main ( int argc , char * argv [ ] )
{
int i , j , k , it ;
int end ;
# 1123 "main.c"
double zeta ;
double rnorm ;
double norm_temp1 , norm_temp2 ;
# 1127 "main.c"
double t , mflops , tmax ;
char Class ;
int verified ;
double zeta_verify_value , epsilon , err ;
# 1132 "main.c"
char * t_names [ 3 ] ;
acc_init ( acc_device_default ) ;
# 1135 "main.c"
for ( i = 0 ; i < 3 ; i ++ ) {
timer_clear ( i ) ;
}
FILE * fp ;
if ( ( fp = fopen ( "timer.flag" , "r" ) ) != ( ( void * ) 0 ) ) {
timeron = true ;
t_names [ 0 ] = "init" ;
t_names [ 1 ] = "benchmk" ;
t_names [ 2 ] = "conjgd" ;
fclose ( fp ) ;
} else {
timeron = false ;
}
# 1150 "main.c"
timer_start ( 0 ) ;
# 1152 "main.c"
firstrow = 0 ;
lastrow = 150000 - 1 ;
firstcol = 0 ;
lastcol = 150000 - 1 ;
# 1157 "main.c"
if ( 150000 == 1400 && 15 == 7 && 75 == 15 && 110.0 == 10 ) {
Class = 'S' ;
zeta_verify_value = 8.5971775078648 ;
} else if ( 150000 == 7000 && 15 == 8 && 75 == 15 && 110.0 == 12 ) {
Class = 'W' ;
zeta_verify_value = 10.362595087124 ;
} else if ( 150000 == 14000 && 15 == 11 && 75 == 15 && 110.0 == 20 ) {
Class = 'A' ;
zeta_verify_value = 17.130235054029 ;
} else if ( 150000 == 75000 && 15 == 13 && 75 == 75 && 110.0 == 60 ) {
Class = 'B' ;
zeta_verify_value = 22.712745482631 ;
} else if ( 150000 == 150000 && 15 == 15 && 75 == 75 && 110.0 == 110 ) {
Class = 'C' ;
zeta_verify_value = 28.973605592845 ;
} else if ( 150000 == 1500000 && 15 == 21 && 75 == 100 && 110.0 == 500 ) {
Class = 'D' ;
zeta_verify_value = 52.514532105794 ;
} else if ( 150000 == 9000000 && 15 == 26 && 75 == 100 && 110.0 == 1500 ) {
Class = 'E' ;
zeta_verify_value = 77.522164599383 ;
} else {
Class = 'U' ;
}
# 1182 "main.c"
printf ( "\n\n NAS Parallel Benchmarks (NPB3.3-ACC-C) - CG Benchmark\n\n" ) ;
printf ( " Size: %11d\n" , 150000 ) ;
printf ( " Iterations: %5d\n" , 75 ) ;
printf ( "\n" ) ;
# 1187 "main.c"
naa = 150000 ;
nzz = ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ;
# 1190 "main.c"
tran = 314159265.0 ;
amult = 1220703125.0 ;
zeta = randlc ( & tran , amult ) ;
# 1197 "main.c"
makea ( naa , nzz , a , colidx , rowstr ,
firstrow , lastrow , firstcol , lastcol ,
arow ,
( int ( * ) [ 15 + 1 ] ) ( void * ) acol ,
( double ( * ) [ 15 + 1 ] ) ( void * ) aelt ,
iv ) ;
# 1207 "main.c"
for ( j = 0 ; j < lastrow - firstrow + 1 ; j ++ ) {
for ( k = rowstr [ j ] ; k < rowstr [ j + 1 ] ; k ++ ) {
colidx [ k ] = colidx [ k ] - firstcol ;
}
}
# 1222 "main.c"
#pragma acc data copyin ( colidx [ 0 : nz ] , a [ 0 : nz ] , rowstr [ 0 : na + 1 ] ) create ( x [ 0 : na + 2 ] , z [ 0 : na + 2 ] , p [ 0 : na + 2 ] , q [ 0 : na + 2 ] , r [ 0 : na + 2 ] )
# 1227 "main.c"
{
int na_gangs = 150000 + 1 ;
# 1232 "main.c"
#pragma acc kernels loop gang ( ( na_gangs + 127 ) / 128 ) vector ( 128 )
for ( i = 0 ; i < 150000 + 1 ; i ++ ) {
x [ i ] = 1.0 ;
}
# 1237 "main.c"
end = lastcol - firstcol + 1 ;
# 1238 "main.c"
#pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 )
for ( j = 0 ; j < end ; j ++ ) {
q [ j ] = 0.0 ;
z [ j ] = 0.0 ;
r [ j ] = 0.0 ;
p [ j ] = 0.0 ;
}
# 1246 "main.c"
zeta = 0.0 ;
# 1248 "main.c"
for ( it = 1 ; it <= 1 ; it ++ ) {
conj_grad ( colidx , rowstr , x , z , a , p , q , r , & rnorm ) ;
# 1259 "main.c"
norm_temp1 = 0.0 ;
norm_temp2 = 0.0 ;
# 1267 "main.c"
#pragma acc parallel loop num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : norm_temp2 )
# 1269 "main.c"
for ( j = 0 ; j < end ; j ++ ) {
norm_temp2 = norm_temp2 + z [ j ] * z [ j ] ;
}
# 1274 "main.c"
norm_temp2 = 1.0 / __builtin_sqrt ( norm_temp2 ) ;
# 1276 "main.c"
# 1279 "main.c"
#pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 )
for ( j = 0 ; j < end ; j ++ ) {
x [ j ] = norm_temp2 * z [ j ] ;
}
}
# 1286 "main.c"
na_gangs = 150000 + 1 ;
# 1290 "main.c"
#pragma acc kernels loop gang ( ( na_gangs + 127 ) / 128 ) vector ( 128 )
for ( i = 0 ; i < 150000 + 1 ; i ++ ) {
x [ i ] = 1.0 ;
}
# 1295 "main.c"
zeta = 0.0 ;
# 1297 "main.c"
timer_stop ( 0 ) ;
# 1299 "main.c"
printf ( " Initialization time = %15.3f seconds\n" , timer_read ( 0 ) ) ;
# 1301 "main.c"
timer_start ( 1 ) ;
# 1303 "main.c"
for ( it = 1 ; it <= 75 ; it ++ ) {
conj_grad ( colidx , rowstr , x , z , a , p , q , r , & rnorm ) ;
# 1314 "main.c"
norm_temp1 = 0.0 ;
norm_temp2 = 0.0 ;
# 1322 "main.c"
#pragma acc parallel loop gang worker vector num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : norm_temp1 , norm_temp2 )
# 1324 "main.c"
for ( j = 0 ; j < end ; j ++ ) {
norm_temp1 = norm_temp1 + x [ j ] * z [ j ] ;
norm_temp2 = norm_temp2 + z [ j ] * z [ j ] ;
}
# 1329 "main.c"
norm_temp2 = 1.0 / __builtin_sqrt ( norm_temp2 ) ;
# 1331 "main.c"
zeta = 110.0 + 1.0 / norm_temp1 ;
if ( it == 1 )
printf ( "\n iteration ||r|| zeta\n" ) ;
printf ( " %5d %20.14E%20.13f\n" , it , rnorm , zeta ) ;
# 1336 "main.c"
# 1339 "main.c"
#pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 )
for ( j = 0 ; j < end ; j ++ ) {
x [ j ] = norm_temp2 * z [ j ] ;
}
}
# 1345 "main.c"
timer_stop ( 1 ) ;
}
# 1352 "main.c"
t = timer_read ( 1 ) ;
# 1354 "main.c"
printf ( " Benchmark completed\n" ) ;
# 1356 "main.c"
epsilon = 1.0e-10 ;
if ( Class != 'U' ) {
err = __builtin_fabs ( zeta - zeta_verify_value ) / zeta_verify_value ;
if ( err <= epsilon ) {
verified = true ;
printf ( " VERIFICATION SUCCESSFUL\n" ) ;
printf ( " Zeta is %20.13E\n" , zeta ) ;
printf ( " Error is %20.13E\n" , err ) ;
} else {
verified = false ;
printf ( " VERIFICATION FAILED\n" ) ;
printf ( " Zeta %20.13E\n" , zeta ) ;
printf ( " The correct zeta is %20.13E\n" , zeta_verify_value ) ;
}
} else {
verified = false ;
printf ( " Problem size unknown\n" ) ;
printf ( " NO VERIFICATION PERFORMED\n" ) ;
}
# 1376 "main.c"
if ( t != 0.0 ) {
mflops = ( double ) ( 2 * 75 * 150000 )
* ( 3.0 + ( double ) ( 15 * ( 15 + 1 ) )
+ 25.0 * ( 5.0 + ( double ) ( 15 * ( 15 + 1 ) ) )
+ 3.0 ) / t / 1000000.0 ;
} else {
mflops = 0.0 ;
}
# 1385 "main.c"
print_results ( "CG" , Class , 150000 , 0 , 0 ,
75 , t ,
mflops , " floating point" ,
verified , "3.3.1" , "06 Dec 2017" ,
"icc" , "icc" , "-lm" , "-I../common" , "-O3 -mcmodel=medium" , "-O3 -mcmodel=medium" , "randdp" ) ;
# 1391 "main.c"
if ( timeron ) {
tmax = timer_read ( 1 ) ;
if ( tmax == 0.0 ) tmax = 1.0 ;
printf ( " SECTION Time (secs)\n" ) ;
for ( i = 0 ; i < 3 ; i ++ ) {
t = timer_read ( i ) ;
if ( i == 0 ) {
printf ( " %8s:%9.3f\n" , t_names [ i ] , t ) ;
} else {
printf ( " %8s:%9.3f (%6.2f%%)\n" , t_names [ i ] , t , t * 100.0 / tmax ) ;
if ( i == 2 ) {
t = tmax - t ;
printf ( " --> %8s:%9.3f (%6.2f%%)\n" , "rest" , t , t * 100.0 / tmax ) ;
}
}
}
}
# 1412 "main.c"
acc_shutdown ( acc_device_default ) ;
printf ( "conj calls=%d, loop iter = %d. \n" , conj_calls , loop_iter ) ;
# 1415 "main.c"
return 0 ;
}
# 1419 "main.c"
static void conj_grad ( int colidx [ ] ,
int rowstr [ ] ,
double x [ ] ,
double z [ ] ,
double a [ ] ,
double p [ ] ,
double q [ ] ,
double r [ ] ,
double * rnorm )
{
int j , k , tmp1 , tmp2 , tmp3 ;
int end ;
int cgit , cgitmax = 25 ;
double d , sum , rho , rho0 , alpha , beta ;
double sum_array [ 150000 + 2 ] ;
conj_calls ++ ;
rho = 0.0 ;
unsigned int num_gangs = 0 ;
# 1441 "main.c"
#pragma acc data present ( colidx [ 0 : nz ] , rowstr [ 0 : na + 1 ] , x [ 0 : na + 2 ] , z [ 0 : na + 2 ] , a [ 0 : nz ] , p [ 0 : na + 2 ] , q [ 0 : na + 2 ] , r [ 0 : na + 2 ] )
# 1446 "main.c"
{
# 1450 "main.c"
#pragma acc kernels loop gang ( ( naa + 127 ) / 128 ) vector ( 128 ) independent
for ( j = 0 ; j < naa ; j ++ ) {
q [ j ] = 0.0 ;
z [ j ] = 0.0 ;
r [ j ] = x [ j ] ;
p [ j ] = r [ j ] ;
}
# 1458 "main.c"
# 1463 "main.c"
#pragma acc parallel loop gang worker vector num_gangs ( ( lastcol - firstcol + 1 + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : rho )
# 1465 "main.c"
for ( j = 0 ; j < lastcol - firstcol + 1 ; j ++ ) {
rho = rho + r [ j ] * r [ j ] ;
}
for ( cgit = 1 ; cgit <= cgitmax ; cgit ++ ) {
# 1501 "main.c"
loop_iter ++ ;
end = lastrow - firstrow + 1 ;
# 1506 "main.c"
# 1506 "main.c"
#pragma acc parallel num_gangs ( end ) num_workers ( 4 ) vector_length ( 32 )
{
# 1508 "main.c"
#pragma acc loop gang
for ( j = 0 ; j < end ; j ++ ) {
tmp1 = rowstr [ j ] ;
tmp2 = rowstr [ j + 1 ] ;
sum = 0.0 ;
# 1513 "main.c"
#pragma acc loop worker vector reduction ( + : sum )
for ( k = tmp1 ; k < tmp2 ; k ++ ) {
tmp3 = colidx [ k ] ;
sum = sum + a [ k ] * p [ tmp3 ] ;
}
q [ j ] = sum ;
}
}
d = 0.0 ;
end = lastcol - firstcol + 1 ;
# 1526 "main.c"
#pragma acc parallel num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 )
{
# 1528 "main.c"
#pragma acc loop gang worker vector reduction ( + : d )
for ( j = 0 ; j < end ; j ++ ) {
d = d + p [ j ] * q [ j ] ;
}
}
# 1534 "main.c"
alpha = rho / d ;
# 1539 "main.c"
rho0 = rho ;
# 1544 "main.c"
rho = 0.0 ;
# 1549 "main.c"
#pragma acc kernels loop gang ( ( end + 1023 ) / 1024 ) vector ( 1024 ) independent
for ( j = 0 ; j < end ; j ++ ) {
z [ j ] = z [ j ] + alpha * p [ j ] ;
r [ j ] = r [ j ] - alpha * q [ j ] ;
}
# 1559 "main.c"
#pragma acc parallel num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 )
{
# 1561 "main.c"
#pragma acc loop gang worker vector reduction ( + : rho )
for ( j = 0 ; j < end ; j ++ )
{
rho = rho + r [ j ] * r [ j ] ;
}
}
# 1568 "main.c"
beta = rho / rho0 ;
# 1573 "main.c"
# 1576 "main.c"
#pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 ) independent
for ( j = 0 ; j < end ; j ++ ) {
p [ j ] = r [ j ] + beta * p [ j ] ;
}
}
# 1582 "main.c"
# 1588 "main.c"
end = lastrow - firstrow + 1 ;
# 1590 "main.c"
#pragma acc parallel loop gang num_gangs ( end ) num_workers ( 4 ) vector_length ( 32 )
# 1592 "main.c"
for ( j = 0 ; j < end ; j ++ ) {
tmp1 = rowstr [ j ] ;
tmp2 = rowstr [ j + 1 ] ;
d = 0.0 ;
# 1596 "main.c"
#pragma acc loop worker vector reduction ( + : d )
for ( k = tmp1 ; k < tmp2 ; k ++ ) {
tmp3 = colidx [ k ] ;
d = d + a [ k ] * z [ tmp3 ] ;
}
r [ j ] = d ;
}
sum = 0.0 ;
# 1609 "main.c"
#pragma acc parallel loop gang worker vector num_gangs ( ( lastcol - firstcol + 1 + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : sum )
# 1613 "main.c"
for ( j = 0 ; j < lastcol - firstcol + 1 ; j ++ ) {
d = x [ j ] - r [ j ] ;
sum = sum + d * d ;
}
# 1618 "main.c"
}
* rnorm = __builtin_sqrt ( sum ) ;
}
# 1623 "main.c"
static void makea ( int n ,
int nz ,
double a [ ] ,
int colidx [ ] ,
int rowstr [ ] ,
int firstrow ,
int lastrow ,
int firstcol ,
int lastcol ,
int arow [ ] ,
int acol [ ] [ 15 + 1 ] ,
double aelt [ ] [ 15 + 1 ] ,
int iv [ ] )
{
int iouter , ivelt , nzv , nn1 ;
int ivc [ 15 + 1 ] ;
double vc [ 15 + 1 ] ;
# 1666 "main.c"
# 1670 "main.c"
nn1 = 1 ;
do {
nn1 = 2 * nn1 ;
} while ( nn1 < n ) ;
# 1678 "main.c"
for ( iouter = 0 ; iouter < n ; iouter ++ ) {
nzv = 15 ;
sprnvc ( n , nzv , nn1 , vc , ivc ) ;
vecset ( n , vc , ivc , & nzv , iouter + 1 , 0.5 ) ;
arow [ iouter ] = nzv ;
for ( ivelt = 0 ; ivelt < nzv ; ivelt ++ ) {
acol [ iouter ] [ ivelt ] = ivc [ ivelt ] - 1 ;
aelt [ iouter ] [ ivelt ] = vc [ ivelt ] ;
}
}
# 1693 "main.c"
sparse ( a , colidx , rowstr , n , nz , 15 , arow , acol ,
aelt , firstrow , lastrow ,
iv , 1.0e-1 , 110.0 ) ;
}
# 1703 "main.c"
static void sparse ( double a [ ] ,
int colidx [ ] ,
int rowstr [ ] ,
int n ,
int nz ,
int nozer ,
int arow [ ] ,
int acol [ ] [ 15 + 1 ] ,
double aelt [ ] [ 15 + 1 ] ,
int firstrow ,
int lastrow ,
int nzloc [ ] ,
double rcond ,
double shift )
{
int nrows ;
# 1724 "main.c"
int i , j , j1 , j2 , nza , k , kk , nzrow , jcol ;
double size , scale , ratio , va ;
logical cont40 ;
# 1732 "main.c"
nrows = lastrow - firstrow + 1 ;
# 1737 "main.c"
for ( j = 0 ; j < nrows + 1 ; j ++ ) {
rowstr [ j ] = 0 ;
}
# 1744 "main.c"
for ( i = 0 ; i < n ; i ++ ) {
for ( nza = 0 ; nza < arow [ i ] ; nza ++ ) {
j = acol [ i ] [ nza ] + 1 ;
rowstr [ j ] = rowstr [ j ] + arow [ i ] ;
}
}
# 1751 "main.c"
rowstr [ 0 ] = 0 ;
for ( j = 1 ; j < nrows + 1 ; j ++ ) {
rowstr [ j ] = rowstr [ j ] + rowstr [ j - 1 ] ;
}
nza = rowstr [ nrows ] - 1 ;
# 1757 "main.c"
if ( nza > nz ) {
printf ( "Space for matrix elements exceeded in sparse\n" ) ;
printf ( "nza, nzmax = %d, %d\n" , nza , nz ) ;
exit ( 1 ) ;
}
# 1767 "main.c"
for ( j = 0 ; j < nrows ; j ++ ) {
for ( k = rowstr [ j ] ; k < rowstr [ j + 1 ] ; k ++ ) {
a [ k ] = 0.0 ;
colidx [ k ] = - 1 ;
}
nzloc [ j ] = 0 ;
}
# 1778 "main.c"
size = 1.0 ;
ratio = __builtin_pow ( rcond , ( 1.0 / ( double ) ( n ) ) ) ;
# 1784 "main.c"
for ( i = 0 ; i < n ; i ++ ) {
for ( nza = 0 ; nza < arow [ i ] ; nza ++ ) {
j = acol [ i ] [ nza ] ;
# 1788 "main.c"
scale = size * aelt [ i ] [ nza ] ;
for ( nzrow = 0 ; nzrow < arow [ i ] ; nzrow ++ ) {
jcol = acol [ i ] [ nzrow ] ;
va = aelt [ i ] [ nzrow ] * scale ;
# 1793 "main.c"
if ( jcol == j && j == i ) {
va = va + rcond - shift ;
}
# 1801 "main.c"
cont40 = false ;
for ( k = rowstr [ j ] ; k < rowstr [ j + 1 ] ; k ++ ) {
if ( colidx [ k ] > jcol ) {
for ( kk = rowstr [ j + 1 ] - 2 ; kk >= k ; kk -- ) {
if ( colidx [ kk ] > - 1 ) {
a [ kk + 1 ] = a [ kk ] ;
colidx [ kk + 1 ] = colidx [ kk ] ;
}
}
colidx [ k ] = jcol ;
a [ k ] = 0.0 ;
cont40 = true ;
break ;
} else if ( colidx [ k ] == - 1 ) {
colidx [ k ] = jcol ;
cont40 = true ;
break ;
} else if ( colidx [ k ] == jcol ) {
nzloc [ j ] = nzloc [ j ] + 1 ;
cont40 = true ;
break ;
}
}
if ( cont40 == false ) {
printf ( "internal error in sparse: i=%d\n" , i ) ;
exit ( 1 ) ;
}
a [ k ] = a [ k ] + va ;
}
}
size = size * ratio ;
}
# 1840 "main.c"
for ( j = 1 ; j < nrows ; j ++ ) {
nzloc [ j ] = nzloc [ j ] + nzloc [ j - 1 ] ;
}
# 1847 "main.c"
for ( j = 0 ; j < nrows ; j ++ ) {
if ( j > 0 ) {
j1 = rowstr [ j ] - nzloc [ j - 1 ] ;
} else {
j1 = 0 ;
}
j2 = rowstr [ j + 1 ] - nzloc [ j ] ;
nza = rowstr [ j ] ;
for ( k = j1 ; k < j2 ; k ++ ) {
a [ k ] = a [ nza ] ;
colidx [ k ] = colidx [ nza ] ;
nza = nza + 1 ;
}
}
for ( j = 1 ; j < nrows + 1 ; j ++ ) {
rowstr [ j ] = rowstr [ j ] - nzloc [ j - 1 ] ;
}
nza = rowstr [ nrows ] - 1 ;
}
# 1868 "main.c"
static void sprnvc ( int n , int nz , int nn1 , double v [ ] , int iv [ ] )
{
int nzv , ii , i ;
double vecelt , vecloc ;
# 1882 "main.c"
nzv = 0 ;
# 1884 "main.c"
while ( nzv < nz ) {
vecelt = randlc ( & tran , amult ) ;
# 1887 "main.c"
vecloc = randlc ( & tran , amult ) ;
i = icnvrt ( vecloc , nn1 ) + 1 ;
if ( i > n ) continue ;
# 1894 "main.c"
logical was_gen = false ;
for ( ii = 0 ; ii < nzv ; ii ++ ) {
if ( iv [ ii ] == i ) {
was_gen = true ;
break ;
}
}
if ( was_gen ) continue ;
v [ nzv ] = vecelt ;
iv [ nzv ] = i ;
nzv = nzv + 1 ;
}
}
# 1912 "main.c"
static int icnvrt ( double x , int ipwr2 )
{
return ( int ) ( ipwr2 * x ) ;
}
# 1921 "main.c"
static void vecset ( int n , double v [ ] , int iv [ ] , int * nzv , int i , double val )
{
int k ;
logical set ;
# 1930 "main.c"
set = false ;
for ( k = 0 ; k < * nzv ; k ++ ) {
if ( iv [ k ] == i ) {
v [ k ] = val ;
set = true ;
}
}
if ( set == false ) {
v [ * nzv ] = val ;
iv [ * nzv ] = i ;
* nzv = * nzv + 1 ;
}
}
|
core_stsmqr.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztsmqr.c, normal z -> s, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
/***************************************************************************//**
*
* @ingroup core_tsmqr
*
* Overwrites the general m1-by-n1 tile A1 and
* m2-by-n2 tile A2 with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q
* | A2 |
*
* trans = PlasmaTrans Q^T * | A1 | | A1 A2 | * Q^T
* | A2 |
*
* where Q is a complex orthogonal matrix defined as the product of k
* elementary reflectors
*
* Q = H(1) H(2) . . . H(k)
*
* as returned by plasma_core_stsqrt.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft : apply Q or Q^T from the Left;
* - PlasmaRight : apply Q or Q^T from the Right.
*
* @param[in] trans
* - PlasmaNoTrans : Apply Q;
* - PlasmaTrans : Apply Q^T.
*
* @param[in] m1
* The number of rows of the tile A1. m1 >= 0.
*
* @param[in] n1
* The number of columns of the tile A1. n1 >= 0.
*
* @param[in] m2
* The number of rows of the tile A2. m2 >= 0.
* m2 = m1 if side == PlasmaRight.
*
* @param[in] n2
* The number of columns of the tile A2. n2 >= 0.
* n2 = n1 if side == PlasmaLeft.
*
* @param[in] k
* The number of elementary reflectors whose product defines
* the matrix Q.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m1-by-n1 tile A1.
* On exit, A1 is overwritten by the application of Q.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m1).
*
* @param[in,out] A2
* On entry, the m2-by-n2 tile A2.
* On exit, A2 is overwritten by the application of Q.
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m2).
*
* @param[in] V
* The i-th row must contain the vector which defines the
* elementary reflector H(i), for i = 1,2,...,k, as returned by
* plasma_core_STSQRT in the first k columns of its array argument V.
*
* @param[in] ldv
* The leading dimension of the array V. ldv >= max(1,k).
*
* @param[in] T
* The ib-by-k triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param work
* Auxiliary workspace array of length
* ldwork-by-n1 if side == PlasmaLeft
* ldwork-by-ib if side == PlasmaRight
*
* @param[in] ldwork
* The leading dimension of the array work.
* ldwork >= max(1,ib) if side == PlasmaLeft
* ldwork >= max(1,m1) if side == PlasmaRight
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_stsmqr(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
float *A1, int lda1,
float *A2, int lda2,
const float *V, int ldv,
const float *T, int ldt,
float *work, int ldwork)
{
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_coreblas_error("illegal value of side");
return -1;
}
if (trans != PlasmaNoTrans && trans != PlasmaTrans) {
plasma_coreblas_error("illegal value of trans");
return -2;
}
if (m1 < 0) {
plasma_coreblas_error("illegal value of m1");
return -3;
}
if (n1 < 0) {
plasma_coreblas_error("illegal value of n1");
return -4;
}
if (m2 < 0 || (m2 != m1 && side == PlasmaRight)) {
plasma_coreblas_error("illegal value of m2");
return -5;
}
if (n2 < 0 || (n2 != n1 && side == PlasmaLeft)) {
plasma_coreblas_error("illegal value of n2");
return -6;
}
if (k < 0 ||
(side == PlasmaLeft && k > m1) ||
(side == PlasmaRight && k > n1)) {
plasma_coreblas_error("illegal value of k");
return -7;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -8;
}
if (A1 == NULL) {
plasma_coreblas_error("NULL A1");
return -9;
}
if (lda1 < imax(1, m1)) {
plasma_coreblas_error("illegal value of lda1");
return -10;
}
if (A2 == NULL) {
plasma_coreblas_error("NULL A2");
return -11;
}
if (lda2 < imax(1, m2)) {
plasma_coreblas_error("illegal value of lda2");
return -12;
}
if (V == NULL) {
plasma_coreblas_error("NULL V");
return -13;
}
if (ldv < imax(1, side == PlasmaLeft ? m2 : n2)) {
plasma_coreblas_error("illegal value of ldv");
return -14;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -15;
}
if (ldt < imax(1, ib)) {
plasma_coreblas_error("illegal value of ldt");
return -16;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -17;
}
if (ldwork < imax(1, side == PlasmaLeft ? ib : m1)) {
plasma_coreblas_error("illegal value of ldwork");
return -18;
}
// quick return
if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0)
return PlasmaSuccess;
int i1, i3;
if ((side == PlasmaLeft && trans != PlasmaNoTrans) ||
(side == PlasmaRight && trans == PlasmaNoTrans)) {
i1 = 0;
i3 = ib;
}
else {
i1 = ((k-1)/ib)*ib;
i3 = -ib;
}
for (int i = i1; i > -1 && i < k; i += i3) {
int kb = imin(ib, k-i);
int ic = 0;
int jc = 0;
int mi = m1;
int ni = n1;
if (side == PlasmaLeft) {
// H or H^T is applied to C(i:m,1:n).
mi = m1 - i;
ic = i;
}
else {
// H or H^T is applied to C(1:m,i:n).
ni = n1 - i;
jc = i;
}
// Apply H or H^T (NOTE: plasma_core_sparfb used to be core_ztsrfb).
plasma_core_sparfb(side, trans, PlasmaForward, PlasmaColumnwise,
mi, ni, m2, n2, kb, 0,
&A1[lda1*jc+ic], lda1,
A2, lda2,
&V[ldv*i], ldv,
&T[ldt*i], ldt,
work, ldwork);
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_stsmqr(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
float *A1, int lda1,
float *A2, int lda2,
const float *V, int ldv,
const float *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*n1]) \
depend(inout:A2[0:lda2*n2]) \
depend(in:V[0:ldv*k]) \
depend(in:T[0:ib*k])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
float *W = (float*)work.spaces[tid];
int ldwork = side == PlasmaLeft ? ib : m1; // TODO: float check
// Call the kernel.
int info = plasma_core_stsmqr(side, trans,
m1, n1, m2, n2, k, ib,
A1, lda1,
A2, lda2,
V, ldv,
T, ldt,
W, ldwork);
if (info != PlasmaSuccess) {
plasma_error("core_stsmqr() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
hist_par.c | /*
NAME:
hist_par: create histograms in parallel
Purpose:
This program will fill an array with pseudo random values, build
a histogram of that array, and then compute statistics. This
can be used as a simple test of the quality of a random number
generator
Usage:
To keep the program as simple as possible, you must edit the file
and change basic parameters. Then compile and run the program.
Algorithm:
As a point of nomenclature, I like to think of a histogram as a sequence
of buckets. I take each item from an array, figure out which bucket it
belongs to, then increment the appropriate bucket counter.
History:
Written by Tim Mattson, 7/2017.
updated with multiple methods 8/2021
*/
#include <stdio.h>
#include <omp.h>
#include <math.h>
#include "random.h"
// uncomment this #define if you want tons of diagnostic output
//#define DEBUG 0
#define num_trials 1000000 // number of x values
#define num_buckets 50 // number of buckets in hitogram
static long xlow = 0.0; // low end of x range
static long xhi = 100.0; // High end of x range
/////////////////////////////////////////////////////////////////////////
// Utility Functions
/////////////////////////////////////////////////////////////////////////
int initHist(long* hist){
for(int i= 0; i< num_buckets; i++)
hist[i] = 0;
return 0;
}
int analyzeResults(double time,long *hist)
{
double sumh=0.0, sumhsq=0.0, ave, std_dev;
// compute statistics ... ave, std-dev for whole histogram and quartiles
for(int i=0;i<num_buckets;i++){
sumh += (double) hist[i];
sumhsq += (double) hist[i]*hist[i];
}
ave = sumh/num_buckets;
std_dev = sqrt(sumhsq - sumh*sumh/(double)num_buckets);
printf(" histogram for %d buckets of %d values\n",num_buckets, num_trials);
printf(" ave = %f, std_dev = %f\n",(float)ave, (float)std_dev);
printf(" in %f seconds\n",(float)time);
return 0;
}
/////////////////////////////////////////////////////////////////////////
int main ()
{
double x[num_trials]; // array used to assign counters in the historgram
int i;
long hist[num_buckets]; // the histogram
double bucket_width; // the width of each bucket in the histogram
double time;
omp_lock_t hist_lcks[num_buckets]; // array of locks, one per bucket
#pragma omp parallel
{
#pragma omp single
printf(" %d threads\n",omp_get_num_threads());
#pragma omp for
for(i= 0; i< num_buckets; i++)
omp_init_lock(&hist_lcks[i]);
}
seed(xlow, xhi); // seed the random number generator over range of x
bucket_width = (xhi-xlow)/(double)num_buckets;
// fill the array
for(int i=0;i<num_trials;i++)
x[i] = drandom();
////////////////////////////////////////////////////////////////
// Assign x values to the right historgram bucket -- sequential
////////////////////////////////////////////////////////////////
printf(" Sequential ");
initHist(hist);
time = omp_get_wtime();
for(int i=0;i<num_trials;i++){
long ival = (long) (x[i] - xlow)/bucket_width;
hist[ival]++;
#ifdef DEBUG
printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival);
#endif
}
time = omp_get_wtime() - time;
analyzeResults(time,hist);
////////////////////////////////////////////////////////////////
// Assign x values to the right historgram bucket -- critical
////////////////////////////////////////////////////////////////
printf(" par with critical ");
initHist(hist);
time = omp_get_wtime();
#pragma omp parallel for
for(int i=0;i<num_trials;i++){
long ival = (long) (x[i] - xlow)/bucket_width;
#pragma omp critical
hist[ival]++;
#ifdef DEBUG
printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival);
#endif
}
time = omp_get_wtime() - time;
analyzeResults(time,hist);
////////////////////////////////////////////////////////////////
// Assign x values to the right historgram bucket -- par with locks
////////////////////////////////////////////////////////////////
printf(" par with locks ");
initHist(hist);
time = omp_get_wtime();
#pragma omp parallel for
for(int i=0;i<num_trials;i++){
long ival = (long) (x[i] - xlow)/bucket_width;
omp_set_lock(&hist_lcks[ival]); // protect the histogram bucket. Should
hist[ival]++; // have little overhead since the locks
omp_unset_lock(&hist_lcks[ival]); // are mostly uncontended
#ifdef DEBUG
printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival);
#endif
}
time = omp_get_wtime() - time;
analyzeResults(time,hist);
////////////////////////////////////////////////////////////////
// Assign x values to the right historgram bucket -- par reduction
////////////////////////////////////////////////////////////////
printf(" par with reduction ");
initHist(hist);
time = omp_get_wtime();
#pragma omp parallel for reduction(+:hist[0:num_buckets])
for(int i=0;i<num_trials;i++){
long ival = (long) (x[i] - xlow)/bucket_width;
hist[ival]++;
#ifdef DEBUG
printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival);
#endif
}
time = omp_get_wtime() - time;
analyzeResults(time,hist);
return 0;
}
|
DRB050-functionparameter-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Arrays passed as function parameters
*/
void foo1(double o1[], double c[], int len)
{
int i ;
#pragma omp parallel for
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
double o1[100];
double c[100];
int main()
{
int i;
#pragma omp parallel for
for (i = 0; i < 100; ++i) {
c[i] = i + 1.01;
o1[i] = i + 1.01;
}
foo1 (o1, c, 100);
for (i = 0; i < 100; ++i) {
printf("%lf\n",o1[i]);
}
return 0;
}
|
chlpca.h | /*
#
# File : chlpca.cpp
# ( C++ source file )
#
# Description : Example of use for the CImg plugin 'plugins/chlpca.h'.
# This file is a part of the CImg Library project.
# ( http://cimg.eu )
#
# Copyright : Jerome Boulanger
# ( http://www.irisa.fr/vista/Equipe/People/Jerome.Boulanger.html )
#
#
# License : CeCILL v2.0
# ( http://www.cecill.info/licences/Licence_CeCILL_V2-en.html )
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
*/
#ifndef cimg_plugin_chlpca
#define cimg_plugin_chlpca
// Define some useful macros.
//! Some loops
#define cimg_for_step1(bound,i,step) for (int i = 0; i<(int)(bound); i+=step)
#define cimg_for_stepX(img,x,step) cimg_for_step1((img)._width,x,step)
#define cimg_for_stepY(img,y,step) cimg_for_step1((img)._height,y,step)
#define cimg_for_stepZ(img,z,step) cimg_for_step1((img)._depth,z,step)
#define cimg_for_stepXY(img,x,y,step) cimg_for_stepY(img,y,step) cimg_for_stepX(img,x,step)
#define cimg_for_stepXYZ(img,x,y,step) cimg_for_stepZ(img,z,step) cimg_for_stepY(img,y,step) cimg_for_stepX(img,x,step)
//! Loop for point J(xj,yj) in the neighborhood of a point I(xi,yi) of size (2*rx+1,2*ry+1)
/**
Point J is kept inside the boundaries of the image img.
example of summing the pixels values in a neighborhood 11x11
cimg_forXY(img,xi,yi) cimg_for_windowXY(img,xi,yi,xj,yj,5,5) dest(yi,yi) += src(xj,yj);
**/
#define cimg_forXY_window(img,xi,yi,xj,yj,rx,ry) \
for (int yi0 = std::max(0,yi-ry), yi1=std::min(yi + ry,(int)img.height() - 1), yj=yi0;yj<=yi1;++yj) \
for (int xi0 = std::max(0,xi-rx), xi1=std::min(xi + rx,(int)img.width() - 1), xj=xi0;xj<=xi1;++xj)
#define cimg_forXYZ_window(img,xi,yi,zi,xj,yj,zj,rx,ry,rz) \
for (int zi0 = std::max(0,zi-rz), zi1=std::min(zi + rz,(int)img.depth() - 1) , zj=zi0;zj<=zi1;++zj) \
for (int yi0 = std::max(0,yi-ry), yi1=std::min(yi + ry,(int)img.height() - 1), yj=yi0;yj<=yi1;++yj) \
for (int xi0 = std::max(0,xi-rx), xi1=std::min(xi + rx,(int)img.width() - 1) , xj=xi0;xj<=xi1;++xj)
//! Crop a patch in the image around position x,y,z and return a column vector
/**
\param x x-coordinate of the center of the patch
\param y y-coordinate of the center of the patch
\param z z-coordinate of the center of the patch
\param px the patch half width
\param px the patch half height
\param px the patch half depth
\return img.get_crop(x0,y0,z0,x1,y1,z1).unroll('y');
**/
CImg<T> get_patch(int x, int y, int z,
int px, int py, int pz) const {
if (depth() == 1){
const int x0 = x - px, y0 = y - py, x1 = x + px, y1 = y + py;
return get_crop(x0, y0, x1, y1).unroll('y');
} else {
const int
x0 = x - px, y0 = y - py, z0 = z - pz,
x1 = x + px, y1 = y + py, z1 = z + pz;
return get_crop(x0, y0, z0, x1, y1, z1).unroll('y');
}
}
//! Extract a local patch dictionnary around point xi,yi,zi
CImg<T> get_patch_dictionnary(const int xi, const int yi, const int zi,
const int px, const int py, const int pz,
const int wx, const int wy, const int wz,
int & idc) const {
const int
n = (2*wx + 1) * (2*wy + 1) * (2 * (depth()==1?0:wz) + 1),
d = (2*px + 1) * (2*py + 1) * (2 * (depth()==1?0:px) + 1) * spectrum();
CImg<> S(n, d);
int idx = 0;
if (depth() == 1) {
cimg_forXY_window((*this), xi, yi, xj, yj, wx, wy){
CImg<T> patch = get_patch(xj, yj, 0, px, py, 1);
cimg_forY(S,y) S(idx,y) = patch(y);
if (xj==xi && yj==yi) idc = idx;
idx++;
}
} else {
cimg_forXYZ_window((*this), xi,yi,zi,xj,yj,zj,wx,wy,wz){
CImg<T> patch = get_patch(xj, yj, zj, px, py, pz);
cimg_forY(S,y) S(idx,y) = patch(y);
if (xj==xi && yj==yi && zj==zi) idc = idx;
idx++;
}
}
S.columns(0, idx - 1);
return S;
}
//! Add a patch to the image
/**
\param x x-coordinate of the center of the patch
\param y y-coordinate of the center of the patch
\param z z-coordinate of the center of the patch
\param img the patch as a 1D column vector
\param px the patch half width
\param px the patch half height
\param px the patch half depth
**/
CImg<T> & add_patch(const int xi, const int yi, const int zi,
const CImg<T> & patch,
const int px, const int py, const int pz) {
const int
x0 = xi - px, y0 = yi - py, z0 = (depth() == 1 ? 0 : zi - pz),
sx = 2 * px + 1, sy = 2 * py + 1, sz = (depth() == 1 ? 1 : 2 * pz +1);
draw_image(x0, y0, z0, 0, patch.get_resize(sx, sy, sz, spectrum(), -1), -1);
return (*this);
}
//! Add a constant patch to the image
/**
\param x x-coordinate of the center of the patch
\param y y-coordinate of the center of the patch
\param z z-coordinate of the center of the patch
\param value in the patch
\param px the patch half width
\param px the patch half height
\param px the patch half depth
**/
CImg<T> & add_patch(const int xi, const int yi, const int zi, const T value,
const int px, const int py, const int pz) {
const int
x0 = xi - px, y0 = yi - py, z0 = (depth() == 1 ? 0 : zi - pz),
x1 = xi + px, y1 = yi + py, z1 = (depth() == 1 ? 0 : zi + pz);
draw_rectangle(x0, y0, z0, 0, x1, y1, z1, spectrum()-1, value, -1);
return (*this);
}
//! CHLPCA denoising from the PhD thesis of Hu Haijuan
/**
\param px the patch half width
\param py the patch half height
\param pz the patch half depth
\param wx the training region half width
\param wy the training region half height
\param wz the training region half depth
\param nstep the subsampling of the image domain
\param nsim the number of patches used for training as a factor of the patch size
\param lambda_min the threshold on the eigen values of the PCA for dimension reduction
\param threshold the threshold on the value of the coefficients
\param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method
\note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799
**/
CImg<T> get_chlpca(const int px, const int py, const int pz,
const int wx, const int wy, const int wz,
const int nstep, const float nsim,
const float lambda_min, const float threshold,
const float noise_std, const bool pca_use_svd) const {
const int
nd = (2*px + 1) * (2*py + 1) * (depth()==1?1:2*pz + 1) * spectrum(),
K = (int)(nsim * nd);
#ifdef DEBUG
fprintf(stderr,"chlpca: p:%dx%dx%d,w:%dx%dx%d,nd:%d,K:%d\n",
2*px + 1,2*py + 1,2*pz + 1,2*wx + 1,2*wy + 1,2*wz + 1,nd,K);
#endif
float sigma;
if (noise_std<0) sigma = (float)std::sqrt(variance_noise());
else sigma = noise_std;
CImg<T> dest(*this), count(*this);
dest.fill(0);
count.fill(0);
cimg_for_stepZ(*this,zi,(depth()==1||pz==0)?1:nstep){
#ifdef cimg_use_openmp
#pragma omp parallel for
#endif
cimg_for_stepXY((*this),xi,yi,nstep){
// extract the training region X
int idc = 0;
CImg<T> S = get_patch_dictionnary(xi,yi,zi,px,py,pz,wx,wy,wz,idc);
// select the K most similar patches within the training set
CImg<T> Sk(S);
CImg<unsigned int> index(S.width());
if (K < Sk.width() - 1){
CImg<T> mse(S.width());
CImg<unsigned int> perms;
cimg_forX(S,x) { mse(x) = (T)S.get_column(idc).MSE(S.get_column(x)); }
mse.sort(perms,true);
cimg_foroff(perms,i) {
cimg_forY(S,j) Sk(i,j) = S(perms(i),j);
index(perms(i)) = i;
}
Sk.columns(0, K);
perms.threshold(K);
} else {
cimg_foroff(index,i) index(i)=i;
}
// centering the patches
CImg<T> M(1, Sk.height(), 1, 1, 0);
cimg_forXY(Sk,x,y) { M(y) += Sk(x,y); }
M /= (T)Sk.width();
cimg_forXY(Sk,x,y) { Sk(x,y) -= M(y); }
// compute the principal component of the training set S
CImg<T> P, lambda;
if (pca_use_svd) {
CImg<T> V;
Sk.get_transpose().SVD(V,lambda,P,true,100);
} else {
(Sk * Sk.get_transpose()).symmetric_eigen(lambda, P);
lambda.sqrt();
}
// dimension reduction
int s = 0;
const T tx = (T)(std::sqrt((double)Sk.width()-1.0) * lambda_min * sigma);
while((lambda(s) > tx) && (s < ((int)lambda.size() - 1))) { s++; }
P.columns(0,s);
// project all the patches on the basis (compute scalar product)
Sk = P.get_transpose() * Sk;
// threshold the coefficients
if (threshold > 0) { Sk.threshold(threshold, 1); }
// project back to pixel space
Sk = P * Sk;
// recenter the patches
cimg_forXY(Sk,x,y) { Sk(x,y) += M(y); }
int j = 0;
cimg_forXYZ_window((*this),xi,yi,zi,xj,yj,zj,wx,wy,wz){
const int id = index(j);
if (id < Sk.width()) {
dest.add_patch(xj, yj, zj, Sk.get_column(id), px, py, pz);
count.add_patch(xj, yj, zj, (T)1, px, py, pz);
}
j++;
}
}
}
cimg_foroff(dest, i) {
if(count(i) != 0) { dest(i) /= count(i); }
else { dest(i) = (*this)(i); }
}
return dest;
}
//! CHLPCA denoising from the PhD thesis of Hu Haijuan
/**
\param px the patch half width
\param px the patch half height
\param px the patch half depth
\param wx the training region half width
\param wy the training region half height
\param wz the training region half depth
\param nstep the subsampling of the image domain
\param nsim the number of patches used for training as a factor of the patch size
\param lambda_min the threshold on the eigen values of the PCA for dimension reduction
\param threshold the threshold on the value of the coefficients
\param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method
\note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799
**/
CImg<T> & chlpca(const int px, const int py, const int pz,
const int wx, const int wy, const int wz,
const int nstep, const float nsim,
const float lambda_min, const float threshold,
const float noise_std, const bool pca_use_svd) {
(*this) = get_chlpca(px, py, pz, wx, wy, wz, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
return (*this);
}
//! CHLPCA denoising from the PhD thesis of Hu Haijuan
/**
\param p the patch half size
\param w the training region half size
\param nstep the subsampling of the image domain
\param nsim the number of patches used for training as a factor of the patch size
\param lambda_min the threshold on the eigen values of the PCA for dimension reduction
\param threshold the threshold on the value of the coefficients
\param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method
\note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799
**/
CImg<T> get_chlpca(const int p=3, const int w=10,
const int nstep=5, const float nsim=10,
const float lambda_min=2, const float threshold = -1,
const float noise_std=-1, const bool pca_use_svd=true) const {
if (depth()==1) return get_chlpca(p, p, 0, w, w, 0, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
else return get_chlpca(p, p, p, w, w, w, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
}
CImg<T> chlpca(const int p=3, const int w=10,
const int nstep=5, const float nsim=10,
const float lambda_min=2, const float threshold = -1,
const float noise_std=-1, const bool pca_use_svd=true) {
(*this) = get_chlpca(p, w, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
return (*this);
}
#endif /* cimg_plugin_chlpca */
|
edgebased_levelset_substep.h | // Kratos Multi-Physics
//
// Copyright (c) 2015, Pooyan Dadvand, Riccardo Rossi, CIMNE (International Center for Numerical Methods in Engineering)
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
//
// - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
// - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the distribution.
// - All advertising materials mentioning features or use of this software must display the following acknowledgement:
// This product includes Kratos Multi-Physics technology.
// - Neither the name of the CIMNE nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED ANDON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THISSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Project Name: Kratos
// Last Modified by: $Author: antonia $
// Date: $Date: 2009-01-14 16:24:38 $
// Revision: $Revision: 1.11 $
//
//
#if !defined(KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED)
#define KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED
// #define DEBUG_OUTPUT
//#define SPLIT_OSS
// #define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/node.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "free_surface_application.h"
#include "custom_utilities/edge_data_c2c.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class EdgeBasedLevelSetSubstep
{
public:
//name for the self defined structure
typedef EdgesStructureTypeC2C<TDim> CSR_Tuple;
typedef vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef std::size_t SizeType;
//constructor and destructor
EdgeBasedLevelSetSubstep (MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part,
const double viscosity,
const double density,
const Vector body_force,
bool use_mass_correction,
double edge_detection_angle,
double stabdt_pressure_factor,
double stabdt_convection_factor,
double tau2_factor,
bool assume_constant_dp
)
: mr_matrix_container (mr_matrix_container),
mr_model_part (mr_model_part),
mstabdt_pressure_factor (stabdt_pressure_factor),
mstabdt_convection_factor (stabdt_convection_factor),
medge_detection_angle (edge_detection_angle),
mtau2_factor (tau2_factor),
massume_constant_dp (assume_constant_dp)
{
for (ModelPart::NodesContainerType::iterator it=mr_model_part.NodesBegin(); it!=mr_model_part.NodesEnd(); it++)
it->FastGetSolutionStepValue (VISCOSITY) = viscosity;
mMolecularViscosity = viscosity;
// mViscosity = viscosity;
noalias (mBodyForce) = body_force;
mRho = density;
mdelta_t_avg = 1000.0;
max_dt = 1.0;
muse_mass_correction = use_mass_correction;
mshock_coeff = 0.7;
mWallLawIsActive = false;
mnumsubsteps=5;
mmax_dt = 0.0;
mcorner_coefficient = 30.0; //50.0;
medge_coefficient = 2.0; //30.0; //10.0;
// for (unsigned int i = 0; i < TDim; i++) mBodyForce[i] = 0;
// mBodyForce[1] = -9.81;
//
// mRho = 1000.0;
std::cout << "Edge based level set substep solver is created" << std::endl;
};
~EdgeBasedLevelSetSubstep()
{
};
void SetBodyForce( const Vector& body_force)
{
noalias(mBodyForce) = body_force;
KRATOS_WATCH(mBodyForce);
}
//***********************************
//function to initialize fluid solver
void Initialize (
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mViscosity.resize (n_nodes);
mr_matrix_container.SetToZero (mViscosity);
mWork.resize (n_nodes);
mr_matrix_container.SetToZero (mWork);
mvel_n.resize (n_nodes);
mr_matrix_container.SetToZero (mvel_n);
mvel_n1.resize (n_nodes);
mr_matrix_container.SetToZero (mvel_n1);
mPn.resize (n_nodes);
mr_matrix_container.SetToZero (mPn);
mPn1.resize (n_nodes);
mr_matrix_container.SetToZero (mPn1);
mHmin.resize (n_nodes);
mr_matrix_container.SetToZero (mHmin);
mHavg.resize (n_nodes);
mr_matrix_container.SetToZero (mHavg);
mNodalFlag.resize (n_nodes);
mr_matrix_container.SetToZero (mNodalFlag);
mdistances.resize (n_nodes);
mr_matrix_container.SetToZero (mdistances);
mTauPressure.resize (n_nodes);
mr_matrix_container.SetToZero (mTauPressure);
mTauConvection.resize (n_nodes);
mr_matrix_container.SetToZero (mTauConvection);
mTau2.resize (n_nodes);
mr_matrix_container.SetToZero (mTau2);
mPi.resize (n_nodes);
mr_matrix_container.SetToZero (mPi);
mXi.resize (n_nodes);
mr_matrix_container.SetToZero (mXi);
mx.resize (n_nodes);
mr_matrix_container.SetToZero (mx);
mEdgeDimensions.resize (n_edges);
mr_matrix_container.SetToZero (mEdgeDimensions);
//convection variables
mBeta.resize (n_nodes);
mr_matrix_container.SetToZero (mBeta);
mPiConvection.resize (n_nodes);
mr_matrix_container.SetToZero (mPiConvection);
mphi_n.resize (n_nodes);
mr_matrix_container.SetToZero (mphi_n);
mphi_n1.resize (n_nodes);
mr_matrix_container.SetToZero (mphi_n1);
mEps.resize (n_nodes);
mr_matrix_container.SetToZero (mEps);
// mD.resize(n_nodes);
// mr_matrix_container.SetToZero(mD);
mA.resize (n_nodes);
mr_matrix_container.SetToZero (mA);
mB.resize (n_nodes);
mr_matrix_container.SetToZero (mB);
mdiv_error.resize (n_nodes);
mr_matrix_container.SetToZero (mdiv_error);
mWallReductionFactor.resize (n_nodes);
mr_matrix_container.SetToZero (mWallReductionFactor);
mdiag_stiffness.resize (n_nodes);
mr_matrix_container.SetToZero (mdiag_stiffness);
mis_slip.resize (n_nodes);
mis_visited.resize (n_nodes);
macc.resize (n_nodes);
mr_matrix_container.SetToZero (macc);
// ValuesVectorType external_pressure;
// external_pressure.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() );
mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() );
mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() );
mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() );
mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() );
mr_matrix_container.FillCoordinatesFromDatabase (mx, mr_model_part.Nodes() );
//set flag for first time step
mFirstStep = true;
//loop to categorize boundary nodes
std::vector< unsigned int> tempFixedVelocities;
std::vector< array_1d<double,TDim> > tempFixedVelocitiesValues;
std::vector< unsigned int> tempPressureOutletList;
std::vector< unsigned int> tempDistanceList;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
int index = inode->FastGetSolutionStepValue (AUX_INDEX);
if (inode->IsFixed (VELOCITY_X) ) //note that the variables can be either all fixed or no one fixed
{
if (inode->IsFixed (VELOCITY_Y) == false || inode->IsFixed (VELOCITY_Z) == false)
{
std::cout << "error found on the fixity of node " << inode->Id() << std::endl;
KRATOS_THROW_ERROR (std::logic_error, "velocities can be either all fixed or none fixed", "")
}
tempFixedVelocities.push_back (index);
tempFixedVelocitiesValues.push_back (mvel_n1[index]);
}
if (inode->IsFixed (DISTANCE) )
tempDistanceList.push_back (index);
if (inode->IsFixed (PRESSURE) )
{
tempPressureOutletList.push_back (index);
// mPressureOutlet.push_back(external_pressure[index]);
}
}
mFixedVelocities.resize (tempFixedVelocities.size(),false);
mFixedVelocitiesValues.resize (tempFixedVelocitiesValues.size(),false);
mPressureOutletList.resize (tempPressureOutletList.size(),false);
mDistanceBoundaryList.resize (tempDistanceList.size(),false);
mDistanceValuesList.resize (tempDistanceList.size(),false);
#pragma omp parallel for
for (int i=0; i<static_cast<int> (tempFixedVelocities.size() ); i++)
{
mFixedVelocities[i] = tempFixedVelocities[i];
mFixedVelocitiesValues[i] = tempFixedVelocitiesValues[i];
}
#pragma omp parallel for
for (int i=0; i<static_cast<int> (tempPressureOutletList.size() ); i++)
{
mPressureOutletList[i] = tempPressureOutletList[i];
}
for (int i=0; i<static_cast<int> (tempDistanceList.size() ); i++)
{
mDistanceBoundaryList[i] = tempDistanceList[i];
}
//compute slip normals and fill SlipList
CalculateNormals (mr_model_part.Conditions() );
mr_matrix_container.WriteVectorToDatabase (NORMAL, mSlipNormal, mr_model_part.Nodes() );
if (TDim == 3)
DetectEdges3D (mr_model_part.Conditions() );
//print number of nodes corresponding to the different types of boundary conditions
// KRATOS_WATCH(mFixedVelocities.size())
// KRATOS_WATCH(mPressureOutletList.size())
// KRATOS_WATCH(mSlipBoundaryList.size())
//determine number of edges and entries
unsigned int n_nonzero_entries = 2 * n_edges + n_nodes;
//allocate memory for variables
mL.resize (n_nodes, n_nodes, n_nonzero_entries);
int number_of_threads= OpenMPUtils::GetNumThreads();
std::vector<int> row_partition (number_of_threads);
OpenMPUtils::DivideInPartitions (n_nodes,number_of_threads,row_partition);
for (int k = 0; k < number_of_threads; k++)
{
#pragma omp parallel
if (OpenMPUtils::ThisThread() == k)
{
for (int i_node = static_cast<int> (row_partition[k]); i_node < static_cast<int> (row_partition[k + 1]); i_node++)
{
//loop over all nodes
// for (unsigned int i_node = 0; i_node < n_nodes; i_node++) {
//flag for considering diagonal matrix elements
bool flag = 0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
//define matrix structure row by row (the order does matter!)
if ( (static_cast<int> (j_neighbour) > i_node) && (flag == 0) )
{
//add diagonal/nodal contribution
mL.push_back (i_node, i_node, 0.0);
flag = 1;
}
//add non-diagonal/edge contribution
mL.push_back (i_node, j_neighbour, 0.0);
}
//if diagonal element is the last non-zero element of the row
if (flag == 0)
mL.push_back (i_node, i_node, 0.0);
}
}
}
//compute minimum length of the surrounding edges
CalculateEdgeLengths (mr_model_part.Nodes() );
//set the pressure projection to the body force value
array_1d<double,3> temp = mRho * mBodyForce;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
inode->FastGetSolutionStepValue (PRESS_PROJ) = temp;
mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() );
//verify that neither h_min nor havg are 0
for (unsigned int i_node=0; i_node<mHmin.size(); i_node++)
{
if (mHmin[i_node] < 1e-20) KRATOS_THROW_ERROR ( std::logic_error,"hmin too small on node ",i_node+1)
if (mHavg[i_node] < 1e-20) KRATOS_THROW_ERROR ( std::logic_error,"havg too small on node ",i_node+1)
if (mHmin[i_node] > 1e20) KRATOS_THROW_ERROR ( std::logic_error,"hmin too big on node ",i_node+1)
if (mHavg[i_node] > 1e20) KRATOS_THROW_ERROR ( std::logic_error,"havg too big on node ",i_node+1)
}
for (ModelPart::ElementsContainerType::iterator it=mr_model_part.ElementsBegin(); it!=mr_model_part.ElementsEnd(); it++)
{
if (it->Id() < 1)
{
KRATOS_THROW_ERROR (std::logic_error, "Element found with Id 0 or negative","")
}
double elem_vol = 0.0;
if (TDim == 2)
elem_vol = it->GetGeometry().Area();
else
elem_vol = it->GetGeometry().Volume();
if (elem_vol <= 0)
{
std::cout << "error on element -> " << it->Id() << std::endl;
KRATOS_THROW_ERROR (std::logic_error, "Area can not be lesser than 0","")
}
}
KRATOS_CATCH ("")
}
void SetShockCapturingCoefficient (double coeff)
{
mshock_coeff = coeff;
}
void GatherValues()
{
KRATOS_TRY
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() );
mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() );
mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() );
mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() );
mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() );
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() );
mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() );
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
double ComputeTimeStep (const double CFLNumber, const double MaxDt)
{
KRATOS_TRY
//save the maximum time step
max_dt = MaxDt;
//local variable for time step size
//getting value of current velocity and of viscosity
// mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() );
// mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() );
// mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() );
// mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() );
// mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() );
// mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes());
//
// mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() );
// mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() );
// mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes());
// mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() );
// mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() );
// double delta_t_i = delta_t;
//*******************
//loop over all nodes
int n_nodes = static_cast<int>(mvel_n1.size());
unsigned int n_proc = OpenMPUtils::GetNumThreads();
Vector dt_avg_vec(n_proc,1e10);
Vector dt_vec(n_proc,1e10);
Vector dt_avg_novisc_vec(n_proc,1e10);
#pragma omp parallel for firstprivate(n_nodes)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
unsigned int my_id = OpenMPUtils::ThisThread();
double& delta_t = dt_vec[my_id];
double& mdelta_t_avg = dt_avg_vec[my_id];
double& delta_t_avg_novisc = dt_avg_novisc_vec[my_id];
const array_1d<double, TDim>& v_i = mvel_n1[i_node];
const double havg_i = mHavg[i_node];
const double hmin_i = mHmin[i_node];
const double eps_i = mEps[i_node];
//const double d_i = mD[i_node];
double nu = mViscosity[i_node];
// const double lindarcy_i = mA[i_node];
// const double nonlindarcy_i = mB[i_node];
double vel_norm = norm_2 (v_i);
//double porosity_coefficient = ComputePorosityCoefficient(nu, vel_norm, eps_i, d_i);
// double porosity_coefficient = ComputePorosityCoefficient( vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
vel_norm /= eps_i;
//use CFL condition to compute time step size
double delta_t_i = 1.0 / (vel_norm /hmin_i + nu / (hmin_i * hmin_i) /*+ porosity_coefficient*/);
double delta_t_i_avg = 1.0 / (vel_norm /havg_i + nu / (havg_i * havg_i) /*+ porosity_coefficient*/);
double delta_t_i_avg_novisc = 1.0 / (2.0 * vel_norm /havg_i );
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
double v_diff_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
double temp = v_i[l_comp] - v_j[l_comp];
v_diff_norm += temp*temp;
}
v_diff_norm = sqrt (v_diff_norm);
v_diff_norm /= eps_i;
double delta_t_j = 1.0 / (v_diff_norm /havg_i + 4.0 * nu / (havg_i * havg_i) );
// double delta_t_j = 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * nu / (hmin_i * hmin_i) );
double delta_t_j_avg_novisc = 1.0 / (2.0 * v_diff_norm /havg_i );
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
if (delta_t_j_avg_novisc < delta_t_i_avg_novisc)
delta_t_i_avg_novisc = delta_t_j_avg_novisc;
// if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0))
// {
// double delta_t_j = CFLNumber * 1.0 / (2.0 * norm_2(v_diff) /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i));
//// double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par)) / mHmin[i_node] + 2.0 * mViscosity / (mHmin[i_node] * mHmin[i_node]));
// // KRATOS_WATCH(delta_t_j);
// // KRATOS_WATCH(delta_t_i);
// if (delta_t_j < delta_t_i)
// delta_t_i = delta_t_j;
// }
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
if (delta_t_i_avg < mdelta_t_avg)
mdelta_t_avg = delta_t_i_avg;
if (delta_t_i_avg_novisc < delta_t_avg_novisc)
delta_t_avg_novisc = delta_t_i_avg_novisc;
}
//finalizing parallel computations
double delta_t = dt_vec[0];
mdelta_t_avg = dt_avg_vec[0];
double delta_t_avg_novisc = dt_avg_novisc_vec[0];
for(unsigned int i=1; i<dt_vec.size(); i++)
{
if(delta_t > dt_vec[i]) delta_t = dt_vec[i];
if(mdelta_t_avg > dt_vec[i]) mdelta_t_avg = dt_avg_vec[i];
if(delta_t_avg_novisc > dt_vec[i]) delta_t_avg_novisc = dt_avg_novisc_vec[i];
}
//take into account wall law in the estimation
// int slip_size = mSlipBoundaryList.size();
// for (int i_slip = 0; i_slip < slip_size; i_slip++)
// {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// double nu = mViscosity[i_node];
//
// double delta_t_i = 0.25*mY_wall*mY_wall/nu;
//
// // Reducing wall friction for the large element near wall. Pooyan.
// double reducing_factor = 1.00;
// double h_min = mHavg[i_node];
// if(mY_wall < h_min)
// reducing_factor = mY_wall / h_min;
// delta_t_i /= reducing_factor;
//
// if (delta_t_i < delta_t)
// delta_t = delta_t_i;
// }
// mdelta_t_avg = delta_t; //this should not be done ... remove it or decide what to do...
delta_t_avg_novisc *= CFLNumber;
//
mnumsubsteps = ceil (delta_t_avg_novisc/delta_t);
// mnumsubsteps += 1; //this is for security
// delta_t *= CFLNumber;
if (mnumsubsteps <= 1)
{
mnumsubsteps=1;
delta_t_avg_novisc = delta_t;
}
//std::cout << "mdelta_t_avg =" << mdelta_t_avg <<std::endl;
//std::cout << "delta_t =" << delta_t <<std::endl;
//std::cout << "mnumsubsteps =" << mnumsubsteps <<std::endl;
delta_t = delta_t_avg_novisc;
// delta_t *= CFLNumber;
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
return delta_t;
KRATOS_CATCH ("")
}
void ApplySmagorinsky (double MolecularViscosity, double Cs)
{
if (Cs != 0)
{
if (TDim == 3)
ApplySmagorinsky3D (MolecularViscosity, Cs);
else
KRATOS_THROW_ERROR (std::logic_error,"smagorinsky not yet implemented in 2D","");
}
}
void UpdateFixedVelocityValues()
{
KRATOS_TRY
//read velocity and pressure data from Kratos
// ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
// mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
const array_1d<double, TDim>& u_i = mvel_n1[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i_fix[comp] = u_i[comp];
}
KRATOS_CATCH ("");
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
void SolveStep1()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize (n_nodes);
//read velocity and pressure data from Kratos
// mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
// mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, rNodes);
// mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, rNodes);
// mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, rNodes);
// mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, rNodes);
mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() );
// mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() );
mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() );
mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() );
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute intrinsic time
double time_inv_avg = 1.0/mdelta_t_avg;
// if(mmax_dt < mdelta_t_avg) mmax_dt = mdelta_t_avg;
// double time_inv_avg = 1.0/mmax_dt;
double stabdt_pressure_factor = mstabdt_pressure_factor;
double stabdt_convection_factor = mstabdt_convection_factor;
//double tau2_factor = mtau2_factor;
#pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor,stabdt_convection_factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
// double& h_i = mHavg[i_node];
double& h_avg_i = mHavg[i_node];
double& h_min_i = mHmin[i_node];
array_1d<double, TDim>& a_i = mvel_n1[i_node];
const double nu_i = mViscosity[i_node];
const double eps_i = mEps[i_node];
//const double d_i = mD[i_node];
const double lindarcy_i = mA[i_node];
const double nonlindarcy_i = mB[i_node];
double vel_norm = norm_2 (a_i);
//double porosity_coefficient = ComputePorosityCoefficient(nu_i, vel_norm, eps_i, d_i);
double porosity_coefficient = ComputePorosityCoefficient (vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
vel_norm /= eps_i;
double tau = 1.0 / (2.0 * vel_norm / h_min_i + stabdt_pressure_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
double tau_conv = 1.0 / (2.0 * vel_norm / h_min_i + stabdt_convection_factor*time_inv_avg );
mTauPressure[i_node] = tau;
mTauConvection[i_node] = tau_conv;
// mTau2[i_node] = (nu_i + h_avg_i*vel_norm*0.5) *tau2_factor;
}
// //smoothen the tau press - mTau2 used as temp var
// #pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// double& tau = mTau2[i_node]; //******************
// tau = mTauPressure[i_node];
// double counter = 1.0;
// //const double& p_i = pressure[i_node];
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
// tau += mTauPressure[j_neighbour];
// counter+=1.0;
// }
// tau/=counter;
// }
//
// mTauPressure = mTau2;
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPi[i_node]; //******************
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const double& eps_i = mEps[i_node];
a_i /= eps_i;
//const double& p_i = pressure[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
const double& eps_j = mEps[j_neighbour];
a_j /= eps_j;
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_ConvectiveContribution (pi_i, a_i, U_i, a_j, U_j);
// edge_ij.Add_grad_p(pi_i, p_i, p_j);
}
// const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// pi_i[l_comp] *= m_inv;
}
int inout_size = mInOutBoundaryList.size();
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < inout_size; i++)
{
unsigned int i_node = mInOutBoundaryList[i];
// double dist = mdistances[i_node];
// if (dist <= 0.0)
// {
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const array_1d<double, TDim>& an_i = mInOutNormal[i_node];
double projection_length = 0.0;
//double Ain = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
}
array_1d<double, TDim>& pi_i = mPi[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
pi_i[comp] += projection_length * U_i[comp] ;
// }
}
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPi[i_node];
const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
}
// //completing with boundary integrals
// //loop over all faces
// for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++)
// {
// //get geometry data of the face
// Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//
// //reference for area normal of the face
// array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
// double A = norm_2(face_normal);
//
// unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX));
// unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX));
// unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX));
//
// if(face_geometry[0].IsFixed(VELOCITY_X) && face_geometry[1].IsFixed(VELOCITY_X) && face_geometry[2].IsFixed(VELOCITY_X))
// {
//
// //KRATOS_WATCH(cond_it->Id());
// // if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == false)
// //{
// const array_1d<double,TDim>& v_0 = mvel_n1[i_node0];
// const array_1d<double,TDim>& v_1 = mvel_n1[i_node1];
// const array_1d<double,TDim>& v_2 = mvel_n1[i_node2];
// double An0 = inner_prod(v_0,face_normal) / (A*mEps[i_node0]);
// double An1 = inner_prod(v_1,face_normal) / (A*mEps[i_node1]);
// double An2 = inner_prod(v_2,face_normal) / (A*mEps[i_node2]);
// //KRATOS_WATCH(face_normal);
// mPi[i_node0] -= ((2.0*An0+An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal;
// mPi[i_node1] -= ((An0+2.0*An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal;
// mPi[i_node2] -= ((An0+An1+2.0*An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal;
// }
// //}
// }
//
//
// //calculating the convective projection
// #pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// array_1d<double, TDim>& pi_i = mPi[i_node]; //******************
// const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// pi_i[l_comp] *= m_inv;
// }
//
//
// KRATOS_WATCH("step1 before rk loop")
// KRATOS_WATCH(mnumsubsteps)
// KRATOS_WATCH(mPn)
// KRATOS_WATCH(mPn1)
// KRATOS_WATCH(mPi)
// KRATOS_WATCH(mvel_n1)
// KRATOS_WATCH(mvel_n)
#ifdef DEBUG_OUTPUT
KRATOS_WATCH("before RK of step1 - new")
double aux_v=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]);
double aux_oldv=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_oldv += inner_prod(mvel_n[i_node],mvel_n[i_node]);
double aux_pi=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_pi += inner_prod(mPi[i_node],mPi[i_node]);
KRATOS_WATCH(inner_prod(mPn,mPn));
KRATOS_WATCH(aux_v);
KRATOS_WATCH(aux_oldv);
KRATOS_WATCH(aux_pi);
KRATOS_WATCH(inner_prod(mdistances,mdistances));
KRATOS_WATCH(inner_prod(mViscosity,mViscosity));
#endif
CalcVectorType auxn = mvel_n;
double n_substeps = mnumsubsteps+1;
double reduced_it = 0;
double energy_initial = 0.0;
double energy_final = 1.0;
//compute initial kinetic energy
#pragma omp parallel for firstprivate(n_nodes) reduction(+:energy_initial)
for (int i_node = 0; i_node < n_nodes; i_node++)
if (mdistances[i_node] <= 0.0)
energy_initial += mr_matrix_container.GetLumpedMass()[i_node] * inner_prod(mvel_n[i_node],mvel_n[i_node]);
//KRATOS_WATCH(energy_initial)
// KRATOS_WATCH(n_substeps)
while(reduced_it++ < 2 )
{
double delta_t_substep = delta_t/n_substeps;
for (unsigned int substep = 0; substep<n_substeps; substep++)
{
//std::cout << "substep " << substep+1 << " of " << n_substeps << std::endl;
mr_matrix_container.AssignVectorToVector (mvel_n, mWork); //mWork = mvel_n
//first step of Runge Kutta
mr_matrix_container.AssignVectorToVector (mvel_n, mvel_n1); //mvel_n1 = mvel_n
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness,rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//second step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//third step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//fourth step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector (mWork, mvel_n1);
ApplyVelocityBC (mvel_n1);
//prepare for next step
mr_matrix_container.AssignVectorToVector (mvel_n1, mvel_n);
}
energy_final = 0.0;
//compute initial kinetic energy
#pragma omp parallel for firstprivate(n_nodes) reduction(+:energy_final)
for (int i_node = 0; i_node < n_nodes; i_node++)
if (mdistances[i_node] <= 0.0)
energy_final += mr_matrix_container.GetLumpedMass()[i_node] * inner_prod(mvel_n1[i_node],mvel_n1[i_node]);
//put back the original velocity at step n
mr_matrix_container.AssignVectorToVector (auxn, mvel_n);
if(energy_final < 1.5*energy_initial) break;
else n_substeps*=10;
if(reduced_it > 1)
{
KRATOS_WATCH(energy_initial)
KRATOS_WATCH(energy_final)
KRATOS_WATCH(n_substeps)
}
}
// mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() );
// KRATOS_WATCH("end of step1")
// KRATOS_WATCH(mvel_n1)
// KRATOS_WATCH(mvel_n)
#ifdef DEBUG_OUTPUT
KRATOS_WATCH("end of step1 - new")
aux_v=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]);
double aux_xi=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_xi += inner_prod(mXi[i_node],mXi[i_node]);
KRATOS_WATCH(inner_prod(mPn,mPn));
KRATOS_WATCH(inner_prod(mdistances,mdistances));
KRATOS_WATCH(inner_prod(mViscosity,mViscosity));
KRATOS_WATCH(aux_v);
KRATOS_WATCH(aux_xi);
#endif
KRATOS_CATCH ("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS (
const CalcVectorType& vel,
const ValuesVectorType& pressure,
const CalcVectorType& convective_velocity,
CalcVectorType& rhs,
ValuesVectorType& diag_stiffness)
{
KRATOS_TRY
int n_nodes = vel.size();
//perform MPI syncronization
//calculating the RHS
array_1d<double, TDim> stab_low;
array_1d<double, TDim> stab_high;
double inverse_rho = 1.0 / mRho;
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
const double nu_i = mViscosity[i_node];
const double nu_j = nu_i;
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = convective_velocity[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = pressure[i_node];
const double& eps_i = mEps[i_node];
const double lindarcy_i = mA[i_node];
const double nonlindarcy_i = mB[i_node];
double edge_tau = mTauConvection[i_node];
a_i /= eps_i;
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass() [i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * eps_i * f_i[comp] ;
//applying the effect of the porosity
double porosity_coefficient = ComputePorosityCoefficient ( norm_2 (U_i), eps_i, lindarcy_i, nonlindarcy_i);
diag_stiffness[i_node]= m_i * porosity_coefficient;
//std::cout << i_node << "rhs =" << rhs_i << "after adding body force" << std::endl;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
array_1d<double, TDim> a_j = convective_velocity[j_neighbour];
const array_1d<double, TDim>& U_j = vel[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = pressure[j_neighbour];
const double& eps_j = mEps[j_neighbour];
// const double& beta_j = mBeta[j_neighbour];
a_j /= eps_j;
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Sub_ConvectiveContribution (rhs_i, a_i, U_i, a_j, U_j);
//std::cout << i_node << "rhs =" << rhs_i << "after convective contrib" << std::endl;
//take care! we miss including a B.C. for the external pressure
//edge_ij.Add_Gp (rhs_i,p_i*inverse_rho,p_j*inverse_rho);
edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho*eps_i, p_j * inverse_rho*eps_i);
// edge_ij.Add_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
//std::cout << i_node << "rhs =" << rhs_i << "after Gp" << std::endl;
edge_ij.Sub_ViscousContribution (rhs_i, U_i, nu_i, U_j, nu_j);
// edge_ij.Add_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
//std::cout << i_node << "rhs =" << rhs_i << "after viscous" << std::endl;
//add stabilization
edge_ij.CalculateConvectionStabilization_LOW (stab_low, a_i, U_i, a_j, U_j);
// edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i,p_i, a_j, U_j,p_j);
edge_ij.CalculateConvectionStabilization_HIGH (stab_high, a_i, pi_i, a_j, pi_j);
// double beta = 1.0;
// double beta = beta_i;
// if(beta_j > beta)
// beta = beta_j;
// beta = 1.0;
// edge_ij.Sub_StabContribution(rhs_i, edge_tau*beta, 1.0, stab_low, stab_high);
// edge_ij.Sub_StabContribution(rhs_i, edge_tau, (1.0-beta), stab_low, stab_high);
edge_ij.Sub_StabContribution (rhs_i, edge_tau, 1.0, stab_low, stab_high);
}
// std::cout << i_node << "rhs =" << rhs_i << std::endl;
}
}
int inout_size = mInOutBoundaryList.size();
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < inout_size; i++)
{
unsigned int i_node = mInOutBoundaryList[i];
// double dist = mdistances[i_node];
// if (dist <= 0.0)
// {
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const array_1d<double, TDim>& an_i = mInOutNormal[i_node];
double projection_length = 0.0;
double Ain = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
Ain += an_i[comp]*an_i[comp];
}
array_1d<double, TDim>& rhs_i = rhs[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] += projection_length * U_i[comp] ;
// }
}
/* for (int i = 0; i < mSlipBoundaryList.size(); i++)
{
int i_node = mSlipBoundaryList[i];
double dist = mdistances[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
const double& p_i = pressure[i_node];
const array_1d<double,3>& Ani = mSlipNormal[i_node];
array_1d<double, TDim>& rhs_i = rhs[i_node];
array_1d<double, TDim> temp;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
temp[l_comp] = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] <= 0.0 && mis_slip[j_neighbour] == true)
{
//const double& p_j = pressure[j_neighbour];
array_1d<double,3> Anj = mSlipNormal[j_neighbour];
Anj /= norm_2(Anj);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
temp[l_comp] += p_i*Anj[l_comp];
}
}
//take out part in the direction of Ani
double Ai = norm_2(Ani);
double aux = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
aux += temp[l_comp]*Ani[l_comp];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
temp[l_comp] -= aux *Ani[l_comp] / (Ai*Ai);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
rhs_i[l_comp] -= 0.25*Ai*temp[l_comp];
}
}*/
// KRATOS_WATCH("finished**************************************************") */
/*
//correction to the pressure graient
//loop over all faces
CalcVectorType press_correction(vel.size());
mr_matrix_container.SetToZero(press_correction);
// mr_matrix_container.SetToZero(slip_area);
for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
double A = norm_2(face_normal);
unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX));
if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true)
{
const double& p_0 = pressure[i_node0];
const double& p_1 = pressure[i_node1];
const double& p_2 = pressure[i_node2];
//TODO: we should only keep the part orthogonal to the external normal on each node!!!!
press_correction[i_node0] -= ((2.0*p_0+p_1+p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal;
press_correction[i_node1] -= ((p_0+2.0*p_1+p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal;
press_correction[i_node2] -= ((p_0+p_1+2.0*p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal;
}
else
{
const array_1d<double,TDim>& v_0 = vel[i_node0];
const array_1d<double,TDim>& v_1 = vel[i_node1];
const array_1d<double,TDim>& v_2 = vel[i_node2];
double An0 = inner_prod(v_0,face_normal) / (A*A);
double An1 = inner_prod(v_1,face_normal) / (A*A);
double An2 = inner_prod(v_2,face_normal) / (A*A);
rhs[i_node0] -= ((2.0*An0+An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal;
rhs[i_node1] -= ((An0+2.0*An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal;
rhs[i_node2] -= ((An0+An1+2.0*An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal;
}
}
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
if (dist <= 0.0 && mis_slip[i_node] == true)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
// array_1d<double, TDim>& an_i = mSlipNormal[i_node];
// double normalization = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// {
// normalization += an_i[comp] * an_i[comp];
// }
// normalization = sqrt(normalization);
array_1d<double,TDim>& press_corr_i = press_correction[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] += press_corr_i[comp];
//we should remove here the normal component!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
}
}
*/
//apply wall resistance
if (mWallLawIsActive == true)
ComputeWallResistance (vel,diag_stiffness);
// ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
// mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH ("")
}
//*************************************************************************
//function to solve fluid equations - fractional step 2: calculate pressure
int SolveStep2 (typename TLinearSolver::Pointer pLinearSolver)
{
KRATOS_TRY
// typedef Node < 3 > PointType;
// typedef PointerVector<PointType > PointVector;
// typedef PointVector::iterator PointIterator;
#pragma omp parallel for
for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
mis_visited[i_node] = 0;
int layer_counter = -1;
boost::numeric::ublas::vector<int> layers(mr_model_part.Nodes().size());
boost::numeric::ublas::vector<int> layer_limits(3);
//Re-generate a container with LAYER 0 and LAYER 1 after convection of the free surface
layer_limits[0] = 0;
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
{
if(mdistances[i_node] < 0.0)
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] >= 0.0 && mis_visited[i_node] == 0 )
{
#pragma omp critical
layers[++layer_counter] = i_node;
mis_visited[i_node] = 1;
break;
}
}
}
else
mPn1[i_node] = 0.0;
}
layer_limits[1] = layer_counter;
for(unsigned int i=0; i<static_cast<unsigned int>(layer_limits[1]); i++)
{
unsigned int i_node = layers[i];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if( mdistances[j_neighbour] >= 0.0 && mis_visited[j_neighbour] == 0)
{
layers[layer_counter++] = j_neighbour;
mis_visited[j_neighbour] = 2;
}
}
}
layer_limits[2] = layer_counter;
int return_value = 0;
//on the first layer outside the pressure is set to a value such that on the free surface the pressure is approx 0
#pragma omp parallel for
for( int iii=static_cast<int>(layer_limits[1]); iii<static_cast<int>(layer_limits[2]); iii++)
{
unsigned int i_node = layers[iii];
array_1d<double, TDim> grad_d;
for (unsigned int comp = 0; comp < TDim; comp++)
grad_d[comp] = 0.0;
double dist_i = mdistances[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const double& dist_j = mdistances[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (grad_d, dist_i, dist_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
grad_d[l_comp] *= m_inv;
double norm_grad = norm_2 (grad_d);
if (norm_grad < 2.0)
{
if(dist_i < 0.01*mHavg[i_node] )
dist_i = 0.0;
else if(dist_i > 2.0*mHavg[i_node] )
{
KRATOS_WATCH("distance is much larger than expected!!")
dist_i = 2.0*mHavg[i_node];
}
if(norm_grad > 0.001)
{
grad_d /= norm_grad; //this is the direction of the gradient of the distances
grad_d *= dist_i; //this is the vector with the distance of node_i from the closest point on the free surface
}
else
{
KRATOS_WATCH("norm grad is very small!!!!")
grad_d *= 0.0;
}
const array_1d<double, TDim>& press_grad = mXi[i_node]; //iii->FastGetSolutionStepValue (PRESS_PROJ);
double pestimate = inner_prod (press_grad,grad_d);
mPn1[i_node] = pestimate;
// KRATOS_WATCH("peastimate step2")
// KRATOS_WATCH(iii->Id())
// KRATOS_WATCH(grad_d)
// KRATOS_WATCH(press_grad)
// KRATOS_WATCH(pestimate)
}
else
{
std::cout << "attention gradient of distance much greater than 1 on node:" << i_node <<std::endl;
return_value = -1;
// return -1;
double avg_number = 0.0;
double pavg = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if( mis_visited[j_neighbour] == 1)
{
pavg += mPn1[j_neighbour];
avg_number += 1.0;
}
}
if (avg_number == 0)
KRATOS_THROW_ERROR (std::logic_error,"can not happen that the extrapolation node has no neighbours","");
mPn1[i_node] = pavg/avg_number;
}
}
//if a node is very close to the free surface (relatively to the element size) fix the pressure on it
// for(ModelPart::NodesContainerType::iterator iii = mr_model_part.NodesBegin(); iii!=mr_model_part.NodesEnd(); iii++)
// {
// unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX);
//
// double dist = mdistances[i_node];
// if(dist > 0.0 && dist < 0.01*mHavg[i_node])
// iii->FastGetSolutionStepValue(PRESSURE) = 0.0;
//
// }
//PREREQUISITES
//allocate memory for variables
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//unknown and right-hand side vector
TSystemVectorType dp, rhs;
dp.resize (n_nodes);
rhs.resize (n_nodes);
array_1d<double, TDim> dU_i, dU_j, work_array;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#ifdef _OPENMP
// double time_inv = 0.0; //1.0/delta_t;
//read the pressure projection from the database
#endif
// mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() );
// mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() );
// mr_matrix_container.FillVectorFromDatabase (PRESS_PROJ, mXi, rNodes);
// mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
//for (int i_node = 0; i_node < n_nodes; i_node++)
// std::cout << mvel_n1[i_node] << std::endl;
//loop over all nodes
// double rho_inv = 1.0 / mRho;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i = 0.0;
const double& p_i = mPn1[i_node];
const double& p_old_i = mPn[i_node];
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
// const double& eps_i = mEps[i_node];
array_1d<double, TDim>& xi_i = mXi[i_node];
double l_ii = 0.0;
// double div_i = 0.0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const double& p_j = mPn1[j_neighbour];
const double& p_old_j = mPn[j_neighbour];
const array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
const array_1d<double, TDim>& xi_j = mXi[j_neighbour];
// const double& eps_j = mEps[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
#ifdef SYMM_PRESS
double edge_tau = 0.25* (mTauPressure[i_node] + mTauPressure[j_neighbour]);
#else
double edge_tau = 0.5*mTauPressure[i_node];
#endif
// double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j);
//
if (edge_tau < delta_t) edge_tau=delta_t;
//compute laplacian operator
double sum_l_ikjk;
edge_ij.CalculateScalarLaplacian (sum_l_ikjk);
// double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau);
double sum_l_ikjk_onlydt = sum_l_ikjk * (delta_t);
sum_l_ikjk *= (delta_t + edge_tau);
//assemble right-hand side
//pressure contribution
// rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i);
rhs_i -= sum_l_ikjk * (p_j - p_i);
rhs_i += sum_l_ikjk_onlydt * (p_old_j - p_old_i);
//calculating the divergence of the fract vel
// edge_ij.Sub_D_v(div_i, U_i_curr*mRho*eps_i, U_j_curr * mRho*eps_j);
edge_ij.Sub_D_v (rhs_i, U_i_curr*mRho, U_j_curr * mRho);
// edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i);
//high order stabilizing term
double temp = 0.0;
// edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j);
edge_ij.Add_div_v (temp, xi_i, xi_j);
rhs_i += edge_tau * temp;
//assemble laplacian matrix
mL (i_node, j_neighbour) = sum_l_ikjk;
l_ii -= sum_l_ikjk;
}
// //area correction to prevent mass loss
// rhs_i -= mdiv_error[i_node];
// rhs_i += div_i * eps_i;
mL (i_node, i_node) = l_ii;
}
if (muse_mass_correction == true)
{
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i -= mdiv_error[i_node];
}
}
//find the max diagonal term
double max_diag = 0.0;
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double L_diag = mL (i_node, i_node);
if (fabs (L_diag) > fabs (max_diag) ) max_diag = L_diag;
}
max_diag *= 1e10;
// if (max_diag < 1e20) max_diag=1e20;
//respect pressure boundary conditions by penalization
// double huge = max_diag * 1e6;
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mL(i_node, i_node) = huge;
// rhs[i_node] = 0.0;
// }
for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
mL (i_node, i_node) = max_diag;
rhs[i_node] = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
mL (i_node, j_neighbour) = 0.0;
}
}
//modification for level_set
// mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
// for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++)
// {
// if(mdistances[i_dist] >= 0)
// {
// mL(i_dist, i_dist) = huge;
// rhs[i_dist] = 0.0;
// }
// }
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
if (mdistances[i_node] >= 0)
{
mL (i_node, i_node) = max_diag;
rhs[i_node] = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
mL (i_node, j_neighbour) = 0.0;
}
}
else
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
if (mdistances[j_neighbour] >= 0)
mL (i_node, j_neighbour) = 0.0;
}
}
}
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// if( fabs(mL(i_node, i_node)) < 1e-20)
// {
// mL(i_node, i_node)=max_diag;
// rhs[i_node] = 0.0;
// KRATOS_WATCH("arghhhhhhhhhhhhhhhhhhhhhhhhhhhhhh");
// }
// }
//compute row scaling factors
TSystemVectorType scaling_factors (n_nodes);
double* Lvalues = mL.value_data().begin();
SizeType* Lrow_indices = mL.index1_data().begin();
SizeType* Lcol_indices = mL.index2_data().begin();
#pragma omp parallel for
for (int k = 0; k < static_cast< int> (mL.size1() ); k++)
{
double t = 0.0;
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
for (SizeType j=col_begin; j<col_end; j++)
if ( static_cast<int> (Lcol_indices[j]) == k)
{
t = fabs (Lvalues[j]);
break;
}
// t += Lvalues[j]*Lvalues[j];
// t = sqrt(t);
scaling_factors[k] = 1.0/sqrt (t);
}
#pragma omp parallel for
for (int k = 0; k < static_cast<int> (mL.size1() ); k++)
{
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
double k_factor = scaling_factors[k];
rhs[k] *= k_factor;
for (SizeType j=col_begin; j<col_end; j++)
{
Lvalues[j] *= scaling_factors[Lcol_indices[j]] * k_factor;
}
}
//set starting vector for iterative solvers
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
dp[i_node] = 0.0;
//KRATOS_WATCH(rhs);
//solve linear equation system L dp = rhs
pLinearSolver->Solve (mL, dp, rhs);
//KRATOS_WATCH(*pLinearSolver)
//update pressure
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
mPn1[i_node] += dp[i_node]*scaling_factors[i_node];
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
// {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mPn1[i_node] = mPressureOutlet[i_pressure];
// }
//write pressure and density to Kratos
mr_matrix_container.WriteScalarToDatabase (PRESSURE, mPn1, rNodes);
//compute pressure proj for the next step
#pragma omp parallel for private(work_array)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& xi_i = mXi[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
xi_i[comp] = 0.0;
double dist = mdistances[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
const double& p_i = mPn1[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const double& p_j = mPn1[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (xi_i, p_i, p_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
xi_i[l_comp] *= m_inv;
}
}
mr_matrix_container.WriteVectorToDatabase (PRESS_PROJ, mXi, rNodes);
// KRATOS_WATCH("end of step2")
// KRATOS_WATCH(mPn)
// KRATOS_WATCH(mPn1)
// KRATOS_WATCH(mXi)
#ifdef DEBUG_OUTPUT
KRATOS_WATCH("end of step2 - new")
double aux_v=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]);
double aux_xi=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_xi += inner_prod(mXi[i_node],mXi[i_node]);
KRATOS_WATCH(inner_prod(mPn1,mPn1));
KRATOS_WATCH(aux_v);
KRATOS_WATCH(aux_xi);
#endif
return return_value;
KRATOS_CATCH ("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 3: correct fractional momentum
void SolveStep3()
{
KRATOS_TRY
//get number of nodes
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//define work array
array_1d<double, TDim> correction;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double factor = 0.5;
if (massume_constant_dp == true)
factor = 1.0;
//compute end of step momentum
double rho_inv = 1.0 / mRho;
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv,factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
double delta_p_i = (mPn1[i_node] - mPn[i_node]) * rho_inv*factor;
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
correction[l_comp] = 0.0;
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
double delta_p_j = (mPn1[j_neighbour] - mPn[j_neighbour]) * rho_inv*factor;
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
// edge_ij.Sub_grad_p(correction,delta_p_i,delta_p_j);
edge_ij.Sub_grad_p(correction, delta_p_i, delta_p_j);
// edge_ij.Add_grad_p(correction, delta_p_i, delta_p_j);
//edge_ij.Add_Gp (correction,delta_p_i,delta_p_j);
// edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j);
}
//compute prefactor
// double coefficient = delta_t * m_inv;
const double m = mr_matrix_container.GetLumpedMass() [i_node];
const double& d = mdiag_stiffness[i_node];
//correct fractional momentum
for (unsigned int comp = 0; comp < TDim; comp++)
{
U_i_curr[comp] += delta_t / (m + delta_t*d) * correction[comp];
}
}
}
// //imit acceleration
// #pragma omp parallel for
// for(int i_node = 0; i_node < n_nodes; i_node++)
// {
// array_1d<double,TDim>& acc = macc[i_node];
// array_1d<double,TDim>& v1 = mvel_n1[i_node];
// array_1d<double,TDim>& v = mvel_n[i_node];
//
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// acc[l_comp] = (v1[l_comp] - v[l_comp])/delta_t;
//
// //limit accelerations to a maximum=100m/s/2
// const double max_acc = 200;
// double acc_norm = norm_2(acc);
// if(acc_norm > max_acc)
// {
// std::cout << "########################### acc norm " << acc_norm <<std::endl;
//
// acc *= max_acc/acc_norm;
// v1 = v;
// v1 += delta_t*acc;
// }
// }
ApplyVelocityBC (mvel_n1);
//save acceleration
#pragma omp parallel for
for(int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double,TDim>& acc = macc[i_node];
array_1d<double,TDim>& v1 = mvel_n1[i_node];
array_1d<double,TDim>& v = mvel_n[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
acc[l_comp] = (v1[l_comp] - v[l_comp])/delta_t;
}
//write velocity of time step n+1 to Kratos
//calculate the error on the divergence
if (muse_mass_correction == true)
{
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
const double dist = mdistances[i_node];
double& div_i_err = mdiv_error[i_node];
div_i_err = 0.0;
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_D_v (div_i_err, U_i_curr*mRho, U_j_curr * mRho);
}
}
}
}
#ifdef DEBUG_OUTPUT
KRATOS_WATCH("end of step 3")
double aux=0.0;
for (int i_node = 0; i_node < n_nodes; i_node++)
aux += inner_prod(mvel_n1[i_node],mvel_n1[i_node]);
KRATOS_WATCH(inner_prod(mPn1,mPn1));
KRATOS_WATCH(aux);
#endif
mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH ("")
}
void ApplyDistanceBC()
{
KRATOS_TRY
//slip condition
int size = mDistanceBoundaryList.size();
#pragma omp parallel for firstprivate(size)
for (int i_dist = 0; i_dist < size; i_dist++)
{
unsigned int i_node = mDistanceBoundaryList[i_dist];
double& dist = mdistances[i_node];
dist = mDistanceValuesList[i_dist];
}
//fix the distance if velocity goes inwards
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++)
// {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// double dist = mphi_n[i_node];
// // if(dist > 0.0)
// // {
// array_1d<double, TDim>& U_i = mvel_n1[i_node];
// array_1d<double, TDim>& an_i = mSlipNormal[i_node];
// double projection_length = 0.0;
// double normalization = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// {
// projection_length += U_i[comp] * an_i[comp];
// }
// if(projection_length > 0.0)
// dist = mphi_n[i_node];
// // }
// }
KRATOS_CATCH ("")
}
//************************************
void ApplyVelocityBC (CalcVectorType& VelArray)
{
KRATOS_TRY
// if(mWallLawIsActive == false)
// {
// std::cout << "applying corners condition" << std::endl;
// apply conditions on corner edges
// int edge_size = medge_nodes_direction.size();
// #pragma omp parallel for firstprivate(edge_size)
// for (int i = 0; i < edge_size; i++)
// {
// int i_node = medge_nodes[i];
// const array_1d<double, TDim>& direction = medge_nodes_direction[i];
// double dist = mdistances[i_node];
//
// if(dist <= 0.0)
// {
// array_1d<double, TDim>& U_i = VelArray[i_node];
// // for (unsigned int comp = 0; comp < TDim; comp++)
// // U_i[comp] = 0.0;
//
// double temp=0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// temp += U_i[comp] * direction[comp];
//
// for (unsigned int comp = 0; comp < TDim; comp++)
// U_i[comp] = direction[comp]*temp;
// }
// }
// // //
// //apply conditions on corners
// int corner_size = mcorner_nodes.size();
// for (int i = 0; i < corner_size; i++)
// {
// int i_node = mcorner_nodes[i];
//
// array_1d<double, TDim>& U_i = VelArray[i_node];
// for (unsigned int comp = 0; comp < TDim; comp++)
// U_i[comp] = 0.0;
// }
// //apply conditions on corners
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
array_1d<double, TDim>& U_i = VelArray[i_node];
// if(mdistances[i_node] <= 0.0)
// {
array_1d<double, TDim> aux;
for (unsigned int comp = 0; comp < TDim; comp++)
aux[comp] = 0.0;
double counter = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& dist_j = mdistances[j_neighbour];
array_1d<double, TDim>& vj = VelArray[j_neighbour];
if(dist_j <= 0 && mis_slip[j_neighbour] == false)
{
counter += 1.0;
for (unsigned int comp = 0; comp < TDim; comp++)
aux[comp] += vj[comp];
}
}
if(counter != 0.0)
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = aux[comp]/counter;
// }
}
// }
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
array_1d<double, TDim>& U_i = VelArray[i_node];
array_1d<double, TDim>& an_i = mSlipNormal[i_node];
double projection_length = 0.0;
double normalization = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
normalization += an_i[comp] * an_i[comp];
}
projection_length /= normalization;
//tangential momentum as difference between original and normal momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] -= projection_length * an_i[comp];
}
}
// //loop over all faces
// ValuesVectorType vel_correction(VelArray.size());
// // CalcVectorType slip_area(VelArray.size());
// int iterations = 10;
// for(unsigned int i=0;i<iterations; i++)
// {
// mr_matrix_container.SetToZero(vel_correction);
// // mr_matrix_container.SetToZero(slip_area);
// for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++)
// {
// if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true)
// {
// //get geometry data of the face
// Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//
// //reference for area normal of the face
// array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
// double n_area = norm_2(face_normal) / static_cast<double>(TDim);
//
// unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX));
// unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX));
// unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX));
//
// const array_1d<double, TDim>& U_0 = VelArray[i_node0];
// const array_1d<double, TDim>& U_1 = VelArray[i_node1];
// const array_1d<double, TDim>& U_2 = VelArray[i_node2];
//
// double vn0=0.0;
// double vn1=0.0;
// double vn2=0.0;
// if(mdistances[i_node0] <= 0 && face_geometry[0].IsFixed(VELOCITY_X) == false) vn0 = inner_prod(U_0,face_normal);
// if(mdistances[i_node1] <= 0 && face_geometry[1].IsFixed(VELOCITY_X) == false) vn1 = inner_prod(U_1,face_normal);
// if(mdistances[i_node2] <= 0 && face_geometry[2].IsFixed(VELOCITY_X) == false) vn2 = inner_prod(U_2,face_normal);
//
// double edge01 = 0.5*(vn0+vn1)*0.333333333333333333333333333333*0.5;
// double edge02 = 0.5*(vn0+vn2)*0.333333333333333333333333333333*0.5;
// double edge12 = 0.5*(vn2+vn2)*0.333333333333333333333333333333*0.5;
//
// vel_correction[i_node0] += edge01 + edge02;
// vel_correction[i_node1] += edge01 + edge12;
// vel_correction[i_node2] += edge02 + edge12;
//
// /* double tmp = 0.333333333333333333333333333333333*0.333333333333333333333333333333333*(vn0+vn1+vn2);
// vel_correction[i_node0] += tmp;
// vel_correction[i_node1] += tmp;
// vel_correction[i_node2] += tmp; */
// }
// }
//
// //slip condition
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++)
// {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// double dist = mdistances[i_node];
// if (dist <= 0.0)
// {
// array_1d<double, TDim>& U_i = VelArray[i_node];
// array_1d<double, TDim>& an_i = mSlipNormal[i_node];
// double normalization = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// {
// normalization += an_i[comp] * an_i[comp];
// }
// //tangential momentum as difference between original and normal momentum
// double coeff = vel_correction[i_node] / normalization;
// for (unsigned int comp = 0; comp < TDim; comp++)
// U_i[comp] += coeff * an_i[comp];
// }
// }
// }
//fixed condition
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
const array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
array_1d<double, TDim>& u_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i[comp] = u_i_fix[comp];
}
}
KRATOS_CATCH ("")
}
//********************************
//function to compute coefficients
void ExtrapolateValues (unsigned int extrapolation_layers)
{
KRATOS_TRY
//ensure that corner nodes are wet if all of the nodes around them have a negative distance
// typedef Node < 3 > PointType;
// typedef PointerVector<PointType > PointVector;
// typedef PointVector::iterator PointIterator;
mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances,mr_model_part.Nodes() );
#pragma omp parallel for
for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
mis_visited[i_node] = 0.0;
boost::numeric::ublas::vector<int> layers(mr_model_part.Nodes().size(),-1);
// std::vector<int> layer_color(mr_model_part.Nodes().size(),-1000);
boost::numeric::ublas::vector<int> layer_limits(extrapolation_layers+1);
layer_limits[0] = 0;
int layer_counter = -1;
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>( mr_model_part.Nodes().size()); i_node++)
{
if(mdistances[i_node] < 0.0)
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] >= 0.0 && mis_visited[i_node] == 0)
{
#pragma omp critical
layers[++layer_counter] = i_node;
mis_visited[i_node] = 1;
break;
}
}
}
else
{
mvel_n1[i_node] = ZeroVector (TDim);
mvel_n[i_node] = ZeroVector (TDim);
mPn[i_node] = 0.0;
mPn1[i_node] = 0.0;
mXi[i_node] = ZeroVector (TDim);
}
}
layer_limits[1] = layer_counter;
//fill the following layers by neighbour relationships
//each layer fills the following
for (unsigned int il = 0; il < extrapolation_layers - 1; il++)
{
//parallelization not trivial
for(unsigned int iii = static_cast<unsigned int>(layer_limits[il]); iii<static_cast<unsigned int>(layer_limits[il+1]); iii++)
{
unsigned int i_node = layers[iii];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] >= 0.0 && mis_visited[j_neighbour] == 0)
{
layers[layer_counter++] = j_neighbour;
mis_visited[j_neighbour] = il+2;
}
}
}
layer_limits[il+2] = layer_counter;
}
array_1d<double, TDim > aux, aux_proj;
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
//double delta_t = CurrentProcessInfo[DELTA_TIME];
//fill the pressure projection on the first layer inside the fluid
//by extrapolating from the pressure projection on the layer -1 (the first layer completely inside the domain)
#pragma omp parallel for
for(int i=layer_limits[0]; i<layer_limits[1]; i++)
{
unsigned int i_node = layers[i];
noalias (aux_proj) = ZeroVector (TDim);
double avg_number = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if( mis_visited[j_neighbour] == 0)
{
const array_1d<double, TDim > & inside_press_grad = mXi[j_neighbour];
noalias (aux_proj) += inside_press_grad;
avg_number += 1.0;
}
}
if (avg_number != 0.0) //this case means that it has some neighbours that are completely internal
{
aux_proj /= avg_number;
noalias (mXi[i_node] ) = aux_proj;
}
else //case in which there is not a layer of nodes completely internal
{
array_1d<double,TDim>& xi = mXi[i_node];
noalias ( xi ) = mRho*mBodyForce;
noalias ( xi ) -= mRho*macc[i_node];
}
}
//perform extrapolation layer by layer by making an average
//of the neighbours of lower order
/* KRATOS_WATCH(extrapolation_layers)
for (unsigned int il = 0; il < extrapolation_layers; il++)
std::cout << layer_limits[il] << " ";
std::cout << std::endl;
std::cout << std::endl;
for (unsigned int il = 0; il < extrapolation_layers; il++)
{
std::cout << "level = " << il << " nneighb = " << layer_limits[il+1] - layer_limits[il] << " -- ";
for(unsigned int iii = layer_limits[il]; iii<layer_limits[il+1]; iii++)
std::cout << layers[iii] << " ";
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << " printing is visited " << std::endl;
for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++)
std::cout << mis_visited[i_node] << std::endl;
std::cout << std::endl;*/
for (int il = 1; il < static_cast<int>(extrapolation_layers); il++)
{
//parallelization of this loop not trivial
for(int iii = layer_limits[il]; iii<layer_limits[il+1]; iii++)
{
unsigned int i_node = layers[iii];
noalias (aux) = ZeroVector (TDim);
noalias (aux_proj) = ZeroVector (TDim);
double avg_number = 0.0;
double pavg = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if (mis_visited[j_neighbour] < (il + 1) && mis_visited[j_neighbour] != 0)
{
const array_1d<double, TDim >& direction_vec = mEdgeDimensions[csr_index];
// noalias (direction_vec) -= coords_bottom;
const array_1d<double, TDim >& press_grad = mXi[j_neighbour]; //i->FastGetSolutionStepValue (PRESS_PROJ);
double temp = inner_prod (direction_vec, press_grad);
double pestimate = mPn[j_neighbour] + temp;
pavg += pestimate;
noalias (aux_proj) += press_grad;
noalias (aux) += mvel_n1[j_neighbour]; //i->FastGetSolutionStepValue (VELOCITY);
avg_number += 1.0;
}
}
if (avg_number != 0.0)
{
aux /= avg_number;
pavg /= avg_number;
aux_proj /= avg_number;
// KRATOS_WATCH(avg_number);
// KRATOS_WATCH(aux);
// KRATOS_WATCH(pavg);
// KRATOS_WATCH(aux_proj);
}
else
{
KRATOS_THROW_ERROR (std::runtime_error, "error in extrapolation:: no neighbours find on a extrapolation layer -- impossible", "");
// KRATOS_THROW_ERROR(std:logic_error,"error in extrapolation:: no neighbours find on a extrapolation layer -- impossible","");
}
mvel_n1[i_node] = aux;
mvel_n[i_node] = aux;
mPn[i_node] = pavg;
// mPn1[i_node] = pavg;
mXi[i_node] = aux_proj;
}
}
//mark nodes on which we will have to solve for convection
//mark all of internal nodes
#pragma omp parallel for
for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
{
if (mdistances[i_node] <= 0.0)
mis_visited[i_node] = 1.0;
else
mis_visited[i_node] = 0.0;
}
//now mark all of the nodes up to the extrapolation layers - 1
for (unsigned int il = 0; il < extrapolation_layers-1; il++)
{
#pragma omp parallel for
for( int iii = static_cast<int>(layer_limits[il]); iii<static_cast<int>(layer_limits[il+1]); iii++)
{
unsigned int i_node = layers[iii];
mis_visited[i_node] = 1.0;
}
}
ApplyVelocityBC (mvel_n1);
// mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() );
// KRATOS_WATCH("end of Extrapolate Values ")
// KRATOS_WATCH(mvel_n1)
// KRATOS_WATCH(mPn)
// KRATOS_WATCH(mPn1)
// KRATOS_WATCH(mXi)
// KRATOS_WATCH(mdistances)
#ifdef DEBUG_OUTPUT
KRATOS_WATCH("end of extrapolate values - new")
double aux_v=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]);
double aux_xi=0.0;
for (int i_node = 0; i_node < mvel_n1.size(); i_node++)
aux_xi += inner_prod(mXi[i_node],mXi[i_node]);
KRATOS_WATCH(inner_prod(mPn1,mPn1));
KRATOS_WATCH(aux_v);
KRATOS_WATCH(aux_xi);
#endif
KRATOS_CATCH ("")
}
void ChangeSignToDistance()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue (DISTANCE);
inode->FastGetSolutionStepValue (DISTANCE) = -dist;
}
KRATOS_CATCH ("")
}
void MarkNodesByDistance (double min, double max)
{
KRATOS_TRY
#pragma omp parallel for
for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
{
double& dist = mdistances[i_node];
if ( dist > min && dist < max )
mis_visited[i_node] = 1.0;
else
mis_visited[i_node] = 0.0;
}
KRATOS_CATCH ("")
}
void SaveScalarVariableToOldStep (Variable<double>& rVar)
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->FastGetSolutionStepValue (rVar, 1) = inode->FastGetSolutionStepValue (rVar);
}
KRATOS_CATCH ("")
}
void MarkExternalAndMixedNodes()
{
KRATOS_TRY
#pragma omp parallel for
for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
mis_visited[i_node] = 0;
for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++)
{
if(mdistances[i_node] > 0.0)
{
mis_visited[i_node] = 1;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
mis_visited[j_neighbour] = 1;
}
}
}
KRATOS_CATCH ("")
}
void MarkInternalAndMixedNodes()
{
KRATOS_TRY
#pragma omp parallel for
for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
mis_visited[i_node] = 0;
for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++)
{
if(mdistances[i_node] <= 0.0)
{
mis_visited[i_node] = 1;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
mis_visited[j_neighbour] = 1;
}
}
}
KRATOS_CATCH ("")
}
void MarkInternalNodes()
{
KRATOS_TRY
#pragma omp parallel for
for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
{
if(mdistances[i_node] <= 0.0)
mis_visited[i_node] = 1;
else
mis_visited[i_node] = 0;
}
KRATOS_CATCH ("")
}
//**************************************
//function to calculate the area normals
void CalculateNormals (ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//2D case
if (TDim == 2)
{
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal2D (cond_it, area_normal);
}//3D case
else if (TDim == 3)
{
//help vectors for cross product
array_1d<double, 3 > v1;
array_1d<double, 3 > v2;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal3D (cond_it, area_normal, v1, v2);
}
// area_normal *= -1; //CHAPUZA: REMOVE!!!s
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
mInOutNormal.resize (n_nodes);
mSlipNormal.resize (n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
noalias (mSlipNormal[i_node]) = ZeroVector (TDim);
mis_slip[i_node] = false;
noalias (mInOutNormal[i_node]) = ZeroVector (TDim);
}
//loop over all faces
const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL);
//slip condition
if (static_cast<bool> (cond_it->GetValue (IS_STRUCTURE) ) == true)
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue (AUX_INDEX) );
array_1d<double, TDim>& slip_normal = mSlipNormal[i_node];
mis_slip[i_node] = true;
for (unsigned int comp = 0; comp < TDim; comp++)
{
slip_normal[comp] += node_factor * face_normal[comp];
}
}
}
//fill the list of slip nodes
std::vector< unsigned int> tempmSlipBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (mis_slip[i_node] == true)
tempmSlipBoundaryList.push_back (i_node);
mis_slip[i_node] = false;
}
mSlipBoundaryList.resize (tempmSlipBoundaryList.size(),false);
#pragma omp parallel for
for (int i=0; i<static_cast<int> (tempmSlipBoundaryList.size() ); i++)
mSlipBoundaryList[i] = tempmSlipBoundaryList[i];
//check that all of the normals are not zero
for (int i=0; i<static_cast<int> (mSlipBoundaryList.size() ); i++)
{
unsigned int i_node = mSlipBoundaryList[i];
double tmp = norm_2(mSlipNormal[i_node]);
if(tmp < 1e-20)
KRATOS_THROW_ERROR(std::logic_error,"found a slip node with zero normal on node with id",i_node+1)
}
//loop over all faces to fill inlet outlet
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL);
bool is_inlet_or_outlet = false;
if (cond_it->GetValue (IS_STRUCTURE) == 0) is_inlet_or_outlet = true;
else
{
for (unsigned int if_node = 0; if_node < TDim; if_node++)
if (face_geometry[if_node].IsFixed (VELOCITY_X) )
is_inlet_or_outlet = true;
}
//slip condition
if (is_inlet_or_outlet) //the opposite of the loop before
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue (AUX_INDEX) );
array_1d<double, TDim>& inout_normal = mInOutNormal[i_node];
mis_slip[i_node] = true; //reutilize it!
for (unsigned int comp = 0; comp < TDim; comp++)
{
inout_normal[comp] += node_factor * face_normal[comp];
}
}
}
// KRATOS_WATCH( mInOutNormal[7-1] );
// KRATOS_THROW_ERROR(std::logic_error,"remove line 2318 " ,"");
std::vector< unsigned int> tempmInOutBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (mis_slip[i_node] == true)
tempmInOutBoundaryList.push_back (i_node);
}
mInOutBoundaryList.resize (tempmInOutBoundaryList.size(),false);
#pragma omp parallel for
for (int i=0; i<static_cast<int> (tempmInOutBoundaryList.size() ); i++)
mInOutBoundaryList[i] = tempmInOutBoundaryList[i];
//store for future use the list of slip nodes
#pragma omp parallel for
for (int i=0; i<static_cast<int> (mis_slip.size() ); i++)
mis_slip[ i ] = false;
#pragma omp parallel for
for (int i=0; i<static_cast<int> (mSlipBoundaryList.size() ); i++)
mis_slip[ mSlipBoundaryList[i] ] = true;
KRATOS_CATCH ("")
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mViscosity.clear();
mWork.clear();
mvel_n.clear();
mvel_n1.clear();
mPn.clear();
mPn1.clear();
mHmin.clear();
mHavg.clear();
mSlipNormal.clear();
mNodalFlag.clear();
mFixedVelocities.clear();
mFixedVelocitiesValues.clear();
mPressureOutletList.clear();
// mPressureOutlet.clear();
mSlipBoundaryList.clear();
mL.clear();
mTauPressure.clear();
mTauConvection.clear();
mTau2.clear();
mBeta.clear();
mPiConvection.clear();
mphi_n.clear();
mphi_n1.clear();
mEps.clear();
// mD.clear();
mA.clear();
mB.clear();
mdiv_error.clear();
mWallReductionFactor.clear();
mdiag_stiffness.clear();
mis_slip.clear();
mis_visited.clear();
macc.clear();
KRATOS_CATCH ("")
}
void ConvectDistance()
{
KRATOS_TRY
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
ValuesVectorType rhs, WorkConvection;
rhs.resize (n_nodes);
WorkConvection.resize (n_nodes);
ValuesVectorType active_nodes;
active_nodes.resize (n_nodes);
// mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() );
//read variables from Kratos
// mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() );
// mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() );
mr_matrix_container.FillScalarFromDatabase (DISTANCE, mphi_n1, mr_model_part.Nodes() );
mr_matrix_container.FillOldScalarFromDatabase (DISTANCE, mphi_n, mr_model_part.Nodes() );
//get the "fresh" values to be fixed_size
for (unsigned int i=0; i< mDistanceValuesList.size(); i++)
{
mDistanceValuesList[ i ] = mphi_n1[ mDistanceBoundaryList[i] ];
}
//mr_matrix_container.AssignVectorToVector(mphi_n1, mphi_n); //mWork = mphi_n
// //chapuza
// //set the distance to zero when it tries to go out of the pressure boundary
// int pressure_size = mPressureOutletList.size();
// #pragma omp parallel for firstprivate(pressure_size)
// for (int iii = 0; iii < pressure_size; iii++)
// {
// unsigned int i_node = mPressureOutletList[iii];
// mphi_n1[i_node] = fabs(mphi_n1[i_node]);
// mphi_n[i_node] = fabs(mphi_n[i_node]);
// }
//create and fill a vector of nodes for which we want to convect the velocity
for (int i_node = 0; i_node < n_nodes; i_node++)
{
active_nodes[i_node] = mis_visited[i_node];
}
// ComputeConvectiveProjection(mPiConvection,mphi_n1,mEps,mvel_n1);
// ComputeLimitor(mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions);
// mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, active_nodes, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double n_substeps = mnumsubsteps;
// del
double delta_t_substep = delta_t/n_substeps;
for (unsigned int substep = 0; substep<n_substeps; substep++)
{
mr_matrix_container.AssignVectorToVector (mphi_n, WorkConvection); //mWork = mphi_n
//first step of Runge Kutta
// mr_matrix_container.AssignVectorToVector(mphi_n,mphi_n1); //mphi_n1 = mphi_n
mr_matrix_container.SetToZero (rhs);
ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1);
ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions);
CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, 0.5 * delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs);
ApplyDistanceBC();
//second step
mr_matrix_container.SetToZero (rhs);
ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1);
ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions);
CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, 0.5 * delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs);
ApplyDistanceBC();
//third step
mr_matrix_container.SetToZero (rhs);
ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1);
ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions);
CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs);
ApplyDistanceBC();
//fourth step
mr_matrix_container.SetToZero (rhs);
ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1);
ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions);
CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
ApplyDistanceBC();
//compute right-hand side
mr_matrix_container.AssignVectorToVector (WorkConvection, mphi_n1);
mr_matrix_container.AssignVectorToVector (mphi_n1, mphi_n);
}
// // make sure that boundary nodes that are very close to the free surface get wet
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++) {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// const double& h_i = mHmin[i_node];
// double& dist_i = mphi_n1[i_node];
//
// if(dist_i > 0.0 && dist_i < 0.5*h_i)
// {
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mphi_n1[j_neighbour] <= 0.0)
// dist_i = -0.01 * h_i;
// }
// }
//
// }
// int fixed_size = mFixedVelocities.size();
// #pragma omp parallel for firstprivate(fixed_size)
// for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) {
// unsigned int i_node = mFixedVelocities[i_velocity];
// const double& h_i = mHmin[i_node];
// double& dist_i = mphi_n1[i_node];
//
// if(dist_i > 0.0 && dist_i < 0.5*h_i)
// {
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mphi_n1[j_neighbour] <= 0.0)
// dist_i = -0.01 * h_i;
// }
// }
// }
//wetten corner nodes if needed
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
bool to_be_wettened = true;
double min_dist = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
double neighb_dist = mphi_n1[j_neighbour];
if (min_dist > neighb_dist)
min_dist = neighb_dist;
if (neighb_dist >= 0.0)
{
to_be_wettened=false;
}
}
if (to_be_wettened==true)
mphi_n1[i_node] = min_dist;
}
mr_matrix_container.WriteScalarToDatabase (DISTANCE, mphi_n1, mr_model_part.Nodes() );
KRATOS_CATCH ("")
}
void ReduceTimeStep (ModelPart& rModelPart, double NewTime)
{
KRATOS_TRY
/*
double current_time = rModelPart.GetProcessInfo()[TIME];
double current_delta_time = rModelPart.GetProcessInfo()[DELTA_TIME];
double old_time = current_time - current_delta_time;
double new_reduced_time = NewTtime;
double new_delta_time = new_reduced_time - old_time;
rModelPart.GetProcessInfo()[TIME] = new_reduced_time;
rModelPart.GetProcessInfo()[DELTA_TIME] = new_delta_time;
//now copy the database from the old step on the top of the current step
int step_data_size = ThisModelPart.GetNodalSolutionStepDataSize();
double* current_data = (pnode)->SolutionStepData().Data(0);
double* old_data = (pnode)->SolutionStepData().Data(1);
for (int j = 0; j < step_data_size; j++)
current_data[j] = old_data[j];
*/
rModelPart.OverwriteSolutionStepData (1, 0);
rModelPart.GetProcessInfo().SetCurrentTime (NewTime);
KRATOS_CATCH ("error in reducing the time step")
}
bool CheckDistanceConvection()
{
int n_large_distance_gradient = 0;
array_1d<double, TDim> grad_d;
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//calculate gradient of distance on the nodes and count occurrences of large gradients (that indicate a failure)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist <= 0.0)
{
for (unsigned int comp = 0; comp < TDim; comp++)
grad_d[comp] = 0.0;
double dist_i = mdistances[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const double& dist_j = mdistances[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (grad_d, dist_i, dist_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
grad_d[l_comp] *= m_inv;
double norm_grad = norm_2 (grad_d);
if (norm_grad > 1.5) //large gradient found
n_large_distance_gradient += 1;
}
}
if (n_large_distance_gradient != 0)
{
bool success = false;
return success;
}
else
{
bool success = true;
return success;
}
}
void ActivateWallResistance (double Ywall)
{
mWallLawIsActive = true;
mY_wall = Ywall;
double max_angle_overall = 0.0;
//compute wall reduction factor
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
/* const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
double AI = norm_2(an_i);
array_1d<double,TDim> nI = an_i/AI;
double min_dot_prod = 1.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& an_j = mSlipNormal[j_neighbour];
double AJ = norm_2(an_j);
if(AJ > 1e-20) //...a slip node!
{
double tmp = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
tmp += nI[comp] * an_j[comp];
tmp /= AJ;
tmp = fabs(tmp);
if(tmp < min_dot_prod) min_dot_prod = tmp;
}
}
double max_angle = acos(min_dot_prod);
// max_angle *= 2.0;
// if(max_angle > 3.1415926*0.5) max_angle = 3.1415926*0.5;
if(max_angle > max_angle_overall) max_angle_overall = max_angle;*/
mWallReductionFactor[i_node] = 1.0; //sin(max_angle) + 0.1; // pow(sin(max_angle),6) * 10.0 /** 100.0*/ ;
}
std::cout << "max angle between normals found in the model = " << max_angle_overall << std::endl;
// mr_matrix_container.WriteScalarToDatabase(YOUNG_MODULUS, mWallReductionFactor, mr_model_part.Nodes());
//slip condition
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++)
// {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// double h = mHavg[i_node];
// if(mY_wall < h)
// mWallReductionFactor[i_node] = mY_wall/h;
// }
//
int edge_size = medge_nodes.size();
#pragma omp parallel for firstprivate(edge_size)
for (int i = 0; i < edge_size; i++)
{
int i_node = medge_nodes[i];
mWallReductionFactor[i_node] = medge_coefficient; //10.0;
}
//
// //apply conditions on corners
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
mWallReductionFactor[i_node] = mcorner_coefficient; //50.0;
}
}
void ActivateClassicalWallResistance (double Ywall)
{
mWallLawIsActive = true;
mY_wall = Ywall;
for (unsigned int i = 0; i < mWallReductionFactor.size(); i++)
mWallReductionFactor[i] = 1.0 ;
}
double ComputeVolumeVariation()
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double dt = CurrentProcessInfo[DELTA_TIME];
//slip condition
int inout_size = mInOutBoundaryList.size();
double vol_var = 0.0;
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < inout_size; i++)
{
unsigned int i_node = mInOutBoundaryList[i];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const array_1d<double, TDim>& an_i = mInOutNormal[i_node];
double projection_length = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
}
vol_var += projection_length;
}
}
return -vol_var * dt;
}
double ComputeWetVolume()
{
KRATOS_TRY
mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() );
//slip condition
double wet_volume = 0.0;
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < static_cast<int> (mdistances.size() ); i++)
{
double dist = mdistances[i];
const double m = mr_matrix_container.GetLumpedMass() [i];
double porosity = mEps[i];
if (dist <= 0.0)
{
wet_volume += m/porosity;
}
}
return wet_volume;
KRATOS_CATCH ("");
}
double ComputeTotalVolume()
{
KRATOS_TRY
mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() );
//slip condition
double volume = 0.0;
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < static_cast<int> (mdistances.size() ); i++)
{
const double m = mr_matrix_container.GetLumpedMass() [i];
double porosity = mEps[i];
volume += m/porosity;
}
return volume;
KRATOS_CATCH ("");
}
void DiscreteVolumeCorrection (double expected_volume, double measured_volume)
{
double volume_error = expected_volume - measured_volume;
if (measured_volume < expected_volume)
{
double layer_volume = 0.0;
std::vector<unsigned int> first_outside;
int n_nodes = mdistances.size();
// find list of the first nodes outside of the fluid and compute their volume
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist > 0.0) //node is outside domain
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
if (mdistances[j_neighbour] <= 0.0)
{
const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass() [i_node];
if (nodal_mass < volume_error - layer_volume)
{
first_outside.push_back (i_node);
layer_volume += nodal_mass;
break;
}
//const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//layer_volume += 1.0/m_inv;
}
}
}
}
// std::cout << ", layer_volume: " << layer_volume << std::endl;
// if (measured_volume + layer_volume <= expected_volume)
{
// mark the nodes in the outside layer with a small negative distance
for (unsigned int i=0; i<first_outside.size(); i++)
{
unsigned int i_node = first_outside[i];
mdistances[i_node] = -mHavg[i_node];
}
}
}
mr_matrix_container.WriteScalarToDatabase (DISTANCE, mdistances, mr_model_part.Nodes() );
//if (measured_volume < expected_volume)
// {
// double layer_volume = 0.0;
// std::vector<unsigned int> first_outside;
// int n_nodes = mdistances.size();
// //find list of the first nodes outside of the fluid and compute their volume
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// double dist = mdistances[i_node];
// if (dist > 0.0) //node is outside domain
// {
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mdistances[j_neighbour] <= 0.0)
// {
// first_outside.push_back(i_node);
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// layer_volume += 1.0/m_inv;
// }
// }
// }
// }
// if (measured_volume + layer_volume <= expected_volume)
// {
// //mark the nodes in the outside layer with a small negative distance
// for(unsigned int i=0; i<first_outside.size(); i++)
// {
// unsigned int i_node = first_outside[i];
// mdistances[i_node] = -mHavg[i_node];
// }
// }
// }
// mr_matrix_container.WriteScalarToDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
}
void SetWallReductionCoefficients (double corner_coefficient, double edge_coefficient)
{
mcorner_coefficient = corner_coefficient;
medge_coefficient = edge_coefficient;
}
void ContinuousVolumeCorrection (double expected_volume, double measured_volume)
{
double volume_error = expected_volume - measured_volume;
if (volume_error == 0.0)
return ;
if (measured_volume < expected_volume)
{
double layer_volume = 0.0;
std::vector<unsigned int> first_outside;
int n_nodes = mdistances.size();
// find list of the first nodes outside of the fluid and compute their volume
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
bool is_bubble = true;
bool is_first_outside = false;
if (dist > 0.0) //node is outside domain
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
if (mdistances[j_neighbour] <= 0.0)
{
is_first_outside = true;
}
else
is_bubble = false;
}
}
if (is_first_outside && !is_bubble)
{
const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass() [i_node];
first_outside.push_back (i_node);
layer_volume += nodal_mass;
// if(nodal_mass > volume_error - layer_volume)
// {
// extra_volume += nodal_mass;
// }
}
}
// std::cout << ", layer_volume: " << layer_volume << std::endl;
if (layer_volume == 0.00)
return;
double ratio = volume_error / layer_volume;
if (ratio > 1.0) ratio = 1.0;
// KRATOS_WATCH (ratio);
if (ratio < 0.1) // NO correction for less than 10% error
return;
double average_layer_h = 0.0;
for (unsigned int i=0; i<first_outside.size(); i++)
{
unsigned int i_node = first_outside[i];
average_layer_h += mHavg[i_node];
}
average_layer_h /= static_cast<double> (first_outside.size() );
for (int i_node = 0; i_node < n_nodes; i_node++)
mdistances[i_node] -= average_layer_h* ratio;
// if((ratio < 1.00))
// {
// // mark the nodes in the outside layer with a small negative distance
// for(unsigned int i=0; i<first_outside.size(); i++)
// {
// unsigned int i_node = first_outside[i];
// mdistances[i_node] -= mHavg[i_node] * ratio;
// }
// }
// else
// {
// // mark the nodes in the outside layer with a small negative distance
// for(unsigned int i=0; i<first_outside.size(); i++)
// {
// unsigned int i_node = first_outside[i];
// mdistances[i_node] = -mHavg[i_node];
// }
// }
}
mr_matrix_container.WriteScalarToDatabase (DISTANCE, mdistances, mr_model_part.Nodes() );
return;
}
// void FindBubbles()
// {
// int n_nodes = mdistances.size();
// ValuesVectorType last_air (n_nodes);
// mr_matrix_container.SetToZero (last_air);
// mr_matrix_container.FillScalarFromDatabase (LAST_AIR, last_air, mr_model_part.Nodes() );
// const int max_bubble_nodes = 12;
// const int min_bubble_nodes = 2;
// #pragma omp parallel for
// for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++)
// mis_visited[i_node] = 0;
//
// // loop over the nodes to find a outside node.
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// double dist = mdistances[i_node];
// if ( (mis_visited[i_node] == 0) && (dist > 0.0) ) // node is outside the domain and has not visited yet
// {
// std::vector<int> outside_nodes (n_nodes,0);
// outside_nodes[0] = i_node;
// mis_visited[i_node] = 1;
// int n_outside = 1;
// for (int i = 0 ; i < n_outside ; i++) // loop over founded outside nodes. NOTE: n_outside is increasing inside the loop
// {
// int this_node = outside_nodes[i];
// // loop over neighbours of this node
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [this_node]; csr_index != mr_matrix_container.GetRowStartIndex() [this_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
// if ( (mis_visited[j_neighbour] == 0) && (mdistances[j_neighbour] >= 0.0) ) // the neighbour node is outside the fluid and not visited yet
// {
// outside_nodes[n_outside] = j_neighbour;
// n_outside++;
// }
// mis_visited[j_neighbour] = 1;
// }
// }
// //KRATOS_WATCH(i_node);
// //KRATOS_WATCH(n_outside);
// //KRATOS_WATCH(is_first_outside);
// if ( (n_outside <= max_bubble_nodes) && (n_outside >= min_bubble_nodes) )
// {
// //KRATOS_WATCH(i_node);
// //KRATOS_WATCH(n_outside);
// for (int i = 0 ; i < n_outside ; i++)
// last_air[outside_nodes[i]] = 1.00;
// }
// }
// }
// mr_matrix_container.WriteScalarToDatabase (LAST_AIR, last_air, mr_model_part.Nodes() );
// }
//
// void FindColdShots()
// {
// int n_nodes = mdistances.size();
// ValuesVectorType cold_shots(n_nodes);
//
// mr_matrix_container.SetToZero(cold_shots);
//
// mr_matrix_container.FillScalarFromDatabase(LAST_AIR, cold_shots, mr_model_part.Nodes());
//
// std::vector<bool> is_first_outside(n_nodes, 0);
//
// std::vector<unsigned int> first_outside;
//
// // find list of the first nodes outside of the fluid
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// double dist = mdistances[i_node];
// if (dist > 0.0) //node is outside domain
// {
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mdistances[j_neighbour] <= 0.0)
// {
// is_first_outside[i_node] = true;
// first_outside.push_back(i_node);
// break;
// }
// }
// }
// }
//
//
// std::vector<bool> is_cold_shot(is_first_outside);
//
// // Now we check if all the neighbours of the first_outside nodes are first outside or inside and mark it as a possible cold shot
// for(unsigned int i=0; i<first_outside.size(); i++)
// {
// unsigned int i_node = first_outside[i];
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(!is_first_outside[j_neighbour])
// {
// is_cold_shot[i_node] = false;
// break;
// }
// }
// }
//
//
// //Now we have the possible cold shots and is time to check the gradient of convection
// for(unsigned int i=0; i<first_outside.size(); i++)
// {
// unsigned int i_node = first_outside[i];
// if(is_cold_shot[i_node])
// {
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mdistances[j_neighbour] <= 0.0)
// {
//
// }
// }
// }
// }
//
//
//
// // Adding the founded cold shots to the previous ones.
// for(int i_node = 0; i_node < n_nodes; i_node++)
// if(is_cold_shot[i_node])
// cold_shots[i_node]=1.00;
//
// mr_matrix_container.WriteScalarToDatabase(LAST_AIR, cold_shots, mr_model_part.Nodes());
// }
void CalculatePorousResistanceLaw(unsigned int res_law)
{
//variables for node based data handling
// ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
// mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes());
// mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
// mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes());
// mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes());
// const double nu_i = mViscosity;
if (res_law == 1)
{
// KRATOS_WATCH("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Calculating Ergun Darcy coefficients ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
/* if the chosen resistance law is ERGUN calculate Ergun A and B*/
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
const double eps = inode->FastGetSolutionStepValue (POROSITY);
// KRATOS_WATCH("POROSITY ")
// KRATOS_WATCH(eps)
const double d = inode->FastGetSolutionStepValue (DIAMETER);
// KRATOS_WATCH("DIAMETER ")
// KRATOS_WATCH(d)
//
// KRATOS_WATCH("VISCOSITY ")
// KRATOS_WATCH(mViscosity)
double& a = inode-> FastGetSolutionStepValue (LIN_DARCY_COEF);
double& b = inode-> FastGetSolutionStepValue (NONLIN_DARCY_COEF);
if (eps < 1.0)
{
double k_inv = 150.0 * (1.0 - eps) * (1.0 - eps) / (eps * eps * eps * d * d);
a = mViscosity * k_inv;
b = (1.75 / eps) * sqrt (k_inv / (150.0 * eps) );
// KRATOS_WATCH("PERMEABILITY ")
// KRATOS_WATCH(k_inv)
// KRATOS_WATCH("LIN DARCY COEFFICIENT ")
// KRATOS_WATCH(a)
// KRATOS_WATCH("NONLIN DARCY COEFFICIENT ")
// KRATOS_WATCH(b)
}
else
{
a = 0;
b = 0;
}
}
}
else
{
/* whether it is a Custom Resistance law or NO resistance law is present ---> set to zero A and B for non porous nodes*/
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
const double eps = inode->FastGetSolutionStepValue (POROSITY); /*reading from kratos database*/
double& a = inode-> FastGetSolutionStepValue (LIN_DARCY_COEF); /*changing kratos database*/
double& b = inode-> FastGetSolutionStepValue (NONLIN_DARCY_COEF); /*changing kratos database*/
if (eps == 1.0)
{
a = 0;
b = 0;
}
}
}
mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() ); /*filling edgebased database reading from kratos database*/
mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() ); /*filling edgebased database reading from kratos database*/
}
private:
double mMolecularViscosity;
double mcorner_coefficient;
double medge_coefficient;
double mmax_dt;
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
int mnumsubsteps;
bool muse_mass_correction;
//parameters controlling the wall law
bool mWallLawIsActive;
double mY_wall;
//parameters for controlling the usage of the delta time in the stabilization
double mstabdt_pressure_factor;
double mstabdt_convection_factor;
double medge_detection_angle;
double mtau2_factor;
bool massume_constant_dp;
//nodal values
ValuesVectorType mViscosity;
//velocity vector U at time steps n and n+1
CalcVectorType mWork, mvel_n, mvel_n1, mx, macc;
//pressure vector p at time steps n and n+1
ValuesVectorType mPn, mPn1;
//coefficients
ValuesVectorType mdistances;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
ValuesVectorType mHavg;
CalcVectorType mEdgeDimensions;
//area normal
CalcVectorType mSlipNormal;
CalcVectorType mInOutNormal;
//projection terms
CalcVectorType mPi, mXi;
//flag for first time step
bool mFirstStep;
//flag to differentiate interior and boundary nodes
ValuesVectorType mNodalFlag;
ValuesVectorType mWallReductionFactor;
//lists of nodes with different types of boundary conditions
IndicesVectorType mSlipBoundaryList, mPressureOutletList, mFixedVelocities, mInOutBoundaryList,mDistanceBoundaryList;
ValuesVectorType mDistanceValuesList;
CalcVectorType mFixedVelocitiesValues;
// ValuesVectorType mPressureOutlet;
//intrinsic time step size
ValuesVectorType mTauPressure;
ValuesVectorType mTauConvection;
ValuesVectorType mTau2;
ValuesVectorType mdiv_error;
boost::numeric::ublas::vector<bool> mis_slip;
boost::numeric::ublas::vector<int> mis_visited;
//variables for resolving pressure equation
//laplacian matrix
TSystemMatrixType mL;
//constant variables
double mRho;
array_1d<double, TDim> mBodyForce;
//variables for convection
ValuesVectorType mphi_n;
ValuesVectorType mphi_n1;
CalcVectorType mPiConvection;
ValuesVectorType mBeta;
//variables for edge BCs
IndicesVectorType medge_nodes;
CalcVectorType medge_nodes_direction;
IndicesVectorType mcorner_nodes;
ValuesVectorType mEps;
ValuesVectorType mdiag_stiffness;
// ValuesVectorType mD;
ValuesVectorType mA;
ValuesVectorType mB;
double mdelta_t_avg;
double max_dt;
double mshock_coeff;
//***********************************************************
//functions to calculate area normals for boundary conditions
void CalculateNormal2D (ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y();
area_normal[1] = - (face_geometry[1].X() - face_geometry[0].X() );
area_normal[2] = 0.00;
noalias ( (cond_it)->GetValue (NORMAL) ) = area_normal;
}
void CalculateNormal3D (ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal, array_1d<double, 3 > & v1, array_1d<double, 3 > & v2)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
v1[0] = face_geometry[1].X() - face_geometry[0].X();
v1[1] = face_geometry[1].Y() - face_geometry[0].Y();
v1[2] = face_geometry[1].Z() - face_geometry[0].Z();
v2[0] = face_geometry[2].X() - face_geometry[0].X();
v2[1] = face_geometry[2].Y() - face_geometry[0].Y();
v2[2] = face_geometry[2].Z() - face_geometry[0].Z();
MathUtils<double>::CrossProduct (area_normal, v1, v2);
area_normal *= -0.5;
noalias ( (cond_it)->GetValue (NORMAL) ) = area_normal;
}
//*********************************************************
//function to calculate minimum length of surrounding edges
void CalculateEdgeLengths (ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = rNodes.size();
//reserve memory for storage of nodal coordinates
std::vector< array_1d<double, TDim > > position;
position.resize (n_nodes);
//get position of all nodes
for (typename ModelPart::NodesContainerType::iterator node_it = rNodes.begin(); node_it != rNodes.end(); node_it++)
{
//get the global index of the node
unsigned int i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue (AUX_INDEX) );
//save its coordinates locally
noalias (position[i_node]) = node_it->Coordinates();
//initialize minimum edge length with relatively big values
// mHmin[i_node] = 1e10;
}
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
if (aaa[i_node] == 0.0)
KRATOS_THROW_ERROR (std::logic_error,"found a 0 hmin on node",i_node);
}
//take unstructured meshes into account
if (TDim == 2)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass() [i_node];
// double& rho_i = mRho[i_node];
h_i = sqrt (2.0 * m_i);
}
}
else if (TDim == 3)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass() [i_node];
// double& rho_i = mRho[i_node];
h_i = pow (6.0 * m_i, 1.0 / 3.0);
}
}
//compute edge coordinates
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim > & pos_i = position[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
array_1d<double, TDim > & pos_j = position[j_neighbour];
array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
for (unsigned int comp = 0; comp < TDim; comp++)
l_k[comp] = pos_i[comp] - pos_j[comp];
}
}
KRATOS_CATCH ("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS_convection (
const ValuesVectorType& mphi,
const CalcVectorType& convective_velocity,
ValuesVectorType& rhs,
ValuesVectorType& active_nodes
)
{
KRATOS_TRY
int n_nodes = mphi.size();
// //calculating the convective projection
//#pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
//
// double& pi_i = mPiConvection[i_node];
// const double& phi_i = mphi[i_node];
//
// //set to zero the projection
// pi_i = 0;
// if (active_nodes[i_node] != 0.0)
// {
//
// const array_1d<double, TDim>& a_i = convective_velocity[i_node];
//
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// if (active_nodes[j_neighbour] != 0.0)
// {
// const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
// const double& phi_j = mphi[j_neighbour];
//
// CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//
// edge_ij.Add_ConvectiveContribution(pi_i, a_i, phi_i, a_j, phi_j);
// }
// }
//
// //apply inverted mass matrix
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// pi_i *= m_inv;
// }
// // KRATOS_WATCH(pi_i);
// // num = fabs(num);
// // if(num > norm_vI*0.0001)
// // mBeta[i_node] = 1.0 - num/denom;
// // else
// // mBeta[i_node] = 1.0;
//
// }
//perform MPI syncronization
//calculating the RHS
double stab_low;
double stab_high;
array_1d<double, TDim> a_i;
array_1d<double, TDim> a_j;
#pragma omp parallel for private(stab_low,stab_high,a_i,a_j)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
const double& h_i = mHavg[i_node];
const double& phi_i = mphi[i_node];
noalias (a_i) = convective_velocity[i_node];
a_i /= mEps[i_node];
const array_1d<double, TDim>& proj_i = mPiConvection[i_node];
// const double& pi_i = mPiConvection[i_node];
double pi_i = proj_i[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
pi_i += proj_i[l_comp] * a_i[l_comp];
// double beta = mBeta[i_node];
rhs_i = 0.0;
if (active_nodes[i_node] != 0.0)
{
const double& beta = mBeta[i_node];
double norm_a = a_i[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
norm_a += a_i[l_comp] * a_i[l_comp];
norm_a = sqrt (norm_a);
//loop to all the edges surrounding node I
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
if (active_nodes[j_neighbour] != 0.0)
{
//double& rhs_j = rhs[j_neighbour];
const double& phi_j = mphi[j_neighbour];
noalias (a_j) = convective_velocity[j_neighbour];
a_j /= mEps[j_neighbour];
// const double& pi_j = mPiConvection[j_neighbour];
const array_1d<double, TDim>& proj_j = mPiConvection[j_neighbour];
double pi_j = proj_j[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
pi_j += proj_j[l_comp] * a_i[l_comp];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
//convection operator
edge_ij.Sub_ConvectiveContribution (rhs_i, a_i, phi_i, a_j, phi_j); //esto funciona
// edge_ij.Sub_D_v(rhs_i, a_i*phi_i, a_i*phi_j);
//calculate stabilization part
edge_ij.CalculateConvectionStabilization_LOW (stab_low, a_i, phi_i, a_j, phi_j);
double edge_tau = mTauConvection[i_node];
edge_ij.CalculateConvectionStabilization_HIGH (stab_high, a_i, pi_i, a_j, pi_j);
edge_ij.Sub_StabContribution (rhs_i, edge_tau, 1.0, stab_low, stab_high);
double coeff = 0.5 * mshock_coeff; //=0.7*0.5;
double laplacian_ij = 0.0;
edge_ij.CalculateScalarLaplacian (laplacian_ij);
double capturing = laplacian_ij * (phi_j - phi_i);
// rhs_i-= coeff*capturing*beta*norm_a*h_i;
double aaa = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
aaa += a_i[k_comp] * a_i[m_comp] * edge_ij.LaplacianIJ (k_comp, m_comp);
if (norm_a > 1e-10)
{
aaa /= (norm_a * norm_a);
double capturing2 = aaa * (phi_j - phi_i);
if (fabs (capturing) > fabs (capturing2) )
rhs_i -= coeff * (capturing - capturing2) * beta * norm_a * h_i;
}
}
}
}
// KRATOS_WATCH(rhs_i);
}
// int inout_size = mInOutBoundaryList.size();
// //#pragma omp parallel for firstprivate(slip_size)
// for (int i = 0; i < inout_size; i++)
// {
// unsigned int i_node = mInOutBoundaryList[i];
// double dist = mdistances[i_node];
// if (dist <= 0.0)
// {
// const array_1d<double, TDim>& U_i = mvel_n1[i_node];
// const array_1d<double, TDim>& an_i = mInOutNormal[i_node];
// double projection_length = 0.0;
// double Ain = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// {
// projection_length += U_i[comp] * an_i[comp];
// Ain += an_i[comp]*an_i[comp];
// }
//
// double& rhs_i = rhs[i_node];
//
// rhs_i += projection_length * mphi[i_node];
// }
// }
// int inout_size = mInOutBoundaryList.size();
// double vol_var = 0.0;
// //#pragma omp parallel for firstprivate(slip_size)
// for (int i = 0; i < inout_size; i++)
// {
// unsigned int i_node = mInOutBoundaryList[i];
// double dist = mdistances[i_node];
// // if (dist <= 0.0)
// // {
// const array_1d<double, TDim>& U_i = mvel_n1[i_node];
// const array_1d<double, TDim>& an_i = mInOutNormal[i_node];
// double A = norm_2(an_i);
//
// double projection_length = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// {
// projection_length += U_i[comp] * an_i[comp];
// }
//
// double& rhs_i = rhs[i_node];
// // if(projection_length > 0) //outlet
// // rhs_i += A;
// // else
// rhs_i -= A;
//
// // }
// }
KRATOS_CATCH ("")
}
//**************************************
void CornerDectectionHelper (Geometry< Node < 3 > >& face_geometry,
const array_1d<double, 3 > & face_normal,
const double An,
const WeakPointerVector<Condition>& neighb,
const unsigned int i1,
const unsigned int i2,
const unsigned int neighb_index,
std::vector<unsigned int>& edge_nodes,
CalcVectorType& cornern_list
)
{
double acceptable_angle = 45.0 / 180.0 * 3.1; //angles of less than 45 deg will be accepted
double acceptable_cos = cos (acceptable_angle);
if (face_geometry[i1].Id() < face_geometry[i2].Id() ) //we do this to add the face ones
{
const array_1d<double, 3 > & neighb_normal = neighb[neighb_index].GetValue (NORMAL);
double neighb_An = norm_2 (neighb_normal);
double cos_normal = 1.0 / (An * neighb_An) * inner_prod (face_normal, neighb_normal);
//if the angle is too big between the two normals then the edge in the middle is a corner
if (cos_normal < acceptable_cos)
{
array_1d<double, 3 > edge = face_geometry[i2].Coordinates() - face_geometry[i1].Coordinates();
double temp = norm_2 (edge);
edge /= temp;
int index1 = face_geometry[i1].FastGetSolutionStepValue (AUX_INDEX);
int index2 = face_geometry[i2].FastGetSolutionStepValue (AUX_INDEX);
edge_nodes[index1] += 1;
edge_nodes[index2] += 1;
// double sign1 = inner_prod (cornern_list[index1], edge);
double sign1 = 0.0;
for(unsigned int i = 0 ; i < edge.size() ; i++)
{sign1 += cornern_list[index1][i]*edge[i];}
if (sign1 >= 0)
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index1][i] += edge[i];
}
else
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index1][i] -= edge[i];
}
double sign2 = inner_prod(cornern_list[index2], edge);
if (sign2 >= 0)
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index2][i] += edge[i];
}
else
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index2][i] -= edge[i];
}
}
}
}
//function to calculate the area normals
void DetectEdges3D (ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
std::vector<unsigned int> temp_edge_nodes (n_nodes);
CalcVectorType temp_cornern_list (n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
temp_edge_nodes[i_node] = 0.0;
noalias (temp_cornern_list[i_node]) = ZeroVector (TDim);
}
//loop over all faces
// const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
const array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL);
double An = norm_2 (face_normal);
unsigned int current_id = cond_it->Id();
//slip condition
if (cond_it->GetValue (IS_STRUCTURE) == 1.0) //this is a slip face --> now look for its neighbours
{
const WeakPointerVector<Condition>& neighb = cond_it->GetValue (NEIGHBOUR_CONDITIONS);
//check for neighbour zero
if (neighb[0].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper (face_geometry, face_normal, An, neighb, 1, 2, 0, temp_edge_nodes, temp_cornern_list);
//check for neighbour one
if (neighb[1].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper (face_geometry, face_normal, An, neighb, 2, 0, 1, temp_edge_nodes, temp_cornern_list);
//check for neighbour two
if (neighb[2].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper (face_geometry, face_normal, An, neighb, 0, 1, 2, temp_edge_nodes, temp_cornern_list);
}
}
// ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
// mr_matrix_container.WriteVectorToDatabase(ACCELERATION, temp_cornern_list, rNodes);
//fill the list of edge_nodes
std::vector<unsigned int> tempmedge_nodes;
std::vector< array_1d<double,TDim> > tempmedge_nodes_direction;
std::vector<unsigned int> tempmcorner_nodes;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (temp_edge_nodes[i_node] == 2) //node is a edge_node
{
tempmedge_nodes.push_back (i_node);
array_1d<double, TDim>& node_edge = temp_cornern_list[i_node];
node_edge /= norm_2 (node_edge);
tempmedge_nodes_direction.push_back (node_edge);
}
else if (temp_edge_nodes[i_node] > 2)
tempmcorner_nodes.push_back (i_node);
}
medge_nodes.resize (tempmedge_nodes.size(),false);
medge_nodes_direction.resize (tempmedge_nodes_direction.size(),false);
mcorner_nodes.resize (tempmcorner_nodes.size(),false);
#pragma omp parallel for
for (int i = 0; i < static_cast<int> (tempmedge_nodes.size() ); i++)
{
medge_nodes[i] = tempmedge_nodes[i];
medge_nodes_direction[i] = tempmedge_nodes_direction[i];
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int> (tempmcorner_nodes.size() ); i++)
{
mcorner_nodes[i] = tempmcorner_nodes[i];
}
for (unsigned int i = 0; i < mcorner_nodes.size(); i++)
{
KRATOS_WATCH (mcorner_nodes[i]);
}
KRATOS_CATCH ("")
}
// double ComputePorosityCoefficient(const double& viscosity, const double& vel_norm, const double& eps, const double& d)
// {
// // const double d = 0.01; //to be changed
// double linear;
// double non_linear;
// if (eps < 1.0)
// {
// double k_inv = 150.0 * (1.0 - eps)*(1.0 - eps) / (eps * eps * eps * d * d);
// linear = eps * viscosity * k_inv;
// non_linear = (1.75 * vel_norm) * sqrt(k_inv / (150.0 * eps));
// // double linear = viscosity * k_inv;
// // double non_linear = (1.75 * vel_norm / eps) * sqrt(k_inv / (150.0 * eps));
// }
// else
// {
// linear = 0.0;
// non_linear = 0.0;
// }
// return linear + non_linear;
// }
double ComputePorosityCoefficient (const double& vel_norm, const double& eps, const double& a, const double& b)
{
double linear;
double non_linear;
linear = eps * a;
non_linear = eps * b * vel_norm;
return linear + non_linear;
}
void LaplacianSmooth (ValuesVectorType& to_be_smoothed, ValuesVectorType& aux)
{
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
double correction = 0.0;
const double& origin_i = to_be_smoothed[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const double& origin_j = to_be_smoothed[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
double l_ikjk;
edge_ij.CalculateScalarLaplacian (l_ikjk);
correction += l_ikjk * (origin_j - origin_i);
}
}
aux[i_node] = origin_i - correction;
}
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
to_be_smoothed[i_node] = aux[i_node];
}
void ComputeWallResistance (
const CalcVectorType& vel,
ValuesVectorType& diag_stiffness
// CalcVectorType& rhs
)
{
//parameters:
// double k = 0.41;
// double B = 5.1;
// double density = mRho;
// double toll = 1e-6;
double ym = mY_wall; //0.0825877; //0.0093823
// double y_plus_incercept = 10.9931899;
// unsigned int itmax = 100;
if (mViscosity[0] == 0)
KRATOS_THROW_ERROR (std::logic_error, "it is not possible to use the wall law with 0 viscosity", "");
/* //slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
double nu = mViscosity[i_node];
//array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//compute the modulus of the velocity
double mod_vel = 0.0;
double area = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
mod_vel += U_i[comp] * U_i[comp];
area += an_i[comp] * an_i[comp];
}
mod_vel = sqrt(mod_vel);
area = sqrt(area);
//now compute the skin friction
double mod_uthaw = sqrt(mod_vel * nu / ym);
double y_plus = ym * mod_uthaw / nu;
if (y_plus > y_plus_incercept)
{
//begin cicle to calculate the real u_thaw's module:
unsigned int it = 0;
double dx = 1e10;
// KRATOS_WATCH(fabs(dx));
while ( (fabs(dx) > toll * mod_uthaw) && (it < itmax) )
{
double a = 1.0 / k;
double temp = a * log(ym * mod_uthaw / nu) + B;
double y = mod_uthaw * (temp) - mod_vel;
double y1 = temp + a;
dx = y / y1;
mod_uthaw -= dx;
it = it + 1;
}
if (it == itmax)
std::cout << "attention max number of iterations exceeded in wall law computation" << std::endl;
}
double tau = mod_uthaw * mod_uthaw ;
tau *= mWallReductionFactor[i_node];
if (mod_vel > 1e-9)
diag_stiffness[i_node] = tau * area / mod_vel;*/
/* int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
double nu = mViscosity[i_node];
//array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//compute the modulus of the velocity
double mod_vel = 0.0;
double area = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
mod_vel += U_i[comp] * U_i[comp];
area += an_i[comp] * an_i[comp];
}
mod_vel = sqrt (mod_vel);
area = sqrt (area);
diag_stiffness[i_node] = area * mod_vel /pow(1.0/k*log(100) + B,2) * mWallReductionFactor[ i_node ];
}
else
diag_stiffness[i_node] = 0.0;
}*/
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size,ym)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
double nu = mMolecularViscosity; //mViscosity[i_node];
//array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//compute the modulus of the velocity
double mod_vel = 0.0;
double area = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
mod_vel += U_i[comp] * U_i[comp];
area += an_i[comp] * an_i[comp];
}
mod_vel = sqrt (mod_vel);
area = sqrt (area);
//the 0.1 is such that the dissipation is as for the linear case for a velocity of 10m/s
diag_stiffness[i_node] = area * nu * mod_vel/ (ym ) * mWallReductionFactor[ i_node ] ;
}
else
{
diag_stiffness[i_node] = 0.0 ;
}
}
// //apply higher resistance normally to the edges
// int edge_size = medge_nodes_direction.size();
// #pragma omp parallel for firstprivate(edge_size)
// for (int i = 0; i < edge_size; i++)
// {
// int i_node = medge_nodes[i];
// double dist = mdistances[i_node];
//
// if(dist <= 0.0)
// {
// double nu = mViscosity[i_node];
// const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//
// //compute the modulus of the velocity
// double area = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// {
// area += an_i[comp] * an_i[comp];
// }
// area = sqrt (area);
//
// diag_stiffness[i_node] += area * nu / (ym ) ;
//
// }
// }
//
// int corner_size = mcorner_nodes.size();
// for (int i = 0; i < corner_size; i++)
// {
// int i_node = mcorner_nodes[i];
// double nu = mViscosity[i_node];
// mWallReductionFactor[i_node] = mcorner_coefficient; //50.0;
// const double m = mr_matrix_container.GetLumpedMass()[i_node];
// diag_stiffness[i_node] += 100.0*m * nu / (ym ) ;
// }
}
void ApplySmagorinsky3D (double MolecularViscosity, double Cs)
{
KRATOS_TRY
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
//calculating the RHS
array_1d<double, TDim> grad_vx;
array_1d<double, TDim> grad_vy;
array_1d<double, TDim> grad_vz;
int n_nodes = rNodes.size();
mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
array_1d<double, TDim> stab_high;
#pragma omp parallel for private(grad_vx,grad_vy,grad_vz)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] = 0.0 ;
grad_vy[comp] = 0.0 ;
grad_vz[comp] = 0.0 ;
}
//compute node by node the gradients
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const double h = mHmin[i_node];
const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (grad_vx, U_i[0], U_j[0]);
edge_ij.Add_grad_p (grad_vy, U_i[1], U_j[1]);
edge_ij.Add_grad_p (grad_vz, U_i[2], U_j[2]);
}
//finalize computation of the gradients
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] *= m_inv ;
grad_vy[comp] *= m_inv ;
grad_vz[comp] *= m_inv ;
}
//symmetrize and multiply by 2
grad_vx[0] *= 2.0;
grad_vy[1] *= 2.0;
if(TDim > 2)
grad_vz[2] *= 2.0;
grad_vx[1] += grad_vy[0];
if(TDim > 2)
grad_vx[2] += grad_vz[0];
if(TDim > 2)
grad_vy[2] += grad_vz[1];
grad_vy[0] += grad_vx[1];
grad_vz[0] += grad_vx[2];
grad_vz[1] += grad_vy[2];
//compute smagorinsky term
double aux = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
aux += grad_vx[comp] * grad_vx[comp] ;
aux += grad_vy[comp] * grad_vy[comp] ;
aux += grad_vz[comp] * grad_vz[comp] ;
}
aux *= 0.5;
if (aux < 0.0 ) aux=0.0;
double turbulent_viscosity = Cs*h*h*sqrt (aux) /**MolecularViscosity*/;
// KRATOS_WATCH(aux);
// KRATOS_WATCH(turbulent_viscosity);
mViscosity[i_node] = turbulent_viscosity + MolecularViscosity;
}
mr_matrix_container.WriteScalarToDatabase (VISCOSITY, mViscosity, rNodes);
KRATOS_CATCH ("");
}
void Add_Effective_Inverse_Multiply (
CalcVectorType& destination,
const CalcVectorType& origin1,
const double value,
const ValuesVectorType& mass,
const ValuesVectorType& diag_stiffness,
const CalcVectorType& origin
)
{
KRATOS_TRY
int loop_size = destination.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& dest = destination[i_node];
const double m = mass[i_node];
const double d = diag_stiffness[i_node];
const array_1d<double, TDim>& origin_vec1 = origin1[i_node];
const array_1d<double, TDim>& origin_value = origin[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = value / (m + value*d) * ( m/value * origin_vec1[comp] + origin_value[comp] );
}
KRATOS_CATCH ("")
}
void ComputeConvectiveProjection (
CalcVectorType& mPiConvection,
const ValuesVectorType& mphi_n1,
const ValuesVectorType& mEps,
const CalcVectorType& mvel_n1
)
{
int n_nodes = mPiConvection.size();
//calculating the convective projection
array_1d<double, TDim> a_i;
array_1d<double, TDim> a_j;
#pragma omp parallel for private(a_i,a_j)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPiConvection[i_node];
// setting to zero the projection
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
/* if (active_nodes[i_node] != 0.0)
{*/
const double& phi_i = mphi_n1[i_node];
noalias (a_i) = mvel_n1[i_node];
a_i /= mEps[i_node];
// loop to all the edges surrounding node I
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
noalias (a_j) = mvel_n1[j_neighbour];
a_j /= mEps[j_neighbour];
const double& phi_j = mphi_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (pi_i, phi_i, phi_j);
// if(i_node == 3255)
// {
// KRATOS_WATCH(j_neighbour)
// KRATOS_WATCH(pi_i)
// KRATOS_WATCH(mEps[i_node])
// KRATOS_WATCH(mEps[j_neighbour])
// KRATOS_WATCH(phi_i)
// KRATOS_WATCH(phi_j)
// KRATOS_WATCH(a_i)
// KRATOS_WATCH(a_j)
// KRATOS_WATCH(mr_matrix_container.GetInvertedMass()[i_node])
// KRATOS_WATCH(edge_ij.Ni_DNj)
//
// }
}
// apply inverted mass matrix
const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
// std::cout << i_node << " " << pi_i << " " << mvel_n1[i_node] << " " << phi_i <<std::endl;
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// if(std::isnan(pi_i[l_comp]))
// KRATOS_WATCH(m_inv);
// }
}
}
void ComputeLimitor (
CalcVectorType& mPiConvection,
const ValuesVectorType& mphi_n1,
ValuesVectorType& mBeta,
const CalcVectorType& mvel_n1,
const CalcVectorType& mEdgeDimensions
)
{
int n_nodes = mPiConvection.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
const array_1d<double, TDim>& pi_i = mPiConvection[i_node];
const double& p_i = mphi_n1[i_node];
double& beta_i = mBeta[i_node];
beta_i = 0.0;
double n = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const double& p_j = mphi_n1[j_neighbour];
const array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
const array_1d<double, TDim>& pi_j = mPiConvection[j_neighbour];
// double proj = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// proj += 0.5*l_k[comp]*(pi_i[comp]+pi_j[comp]);
// double beta = fabs((p_i - p_j - proj)/(fabs(p_i-p_j)+fabs(proj)+1e-4));
double proj = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
proj += 0.5 * l_k[comp]* (pi_i[comp] + pi_j[comp]);
// proj += dir[comp]*pi_i[comp];
double numerator = fabs (fabs (p_j - p_i) - fabs (proj) );
double denom = fabs (fabs (p_j - p_i) + 1e-6);
beta_i += numerator / denom;
n += 1.0;
}
beta_i /= n;
if (beta_i > 1.0)
beta_i = 1.0;
}
}
};
} //namespace Kratos
#undef SYMM_PRESS
#endif //KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED defined
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
sixs_runs.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "sixs_runs.h"
struct etm_spectral_function_t {
int nbvals[SIXS_NB_BANDS];
float wlinf[SIXS_NB_BANDS];
float wlsup[SIXS_NB_BANDS];
float response[SIXS_NB_BANDS][155];
} etm_spectral_function_t;
int create_6S_tables(sixs_tables_t *sixs_tables, Input_meta_t *meta) {
char cmd[128],sixs_cmd_filename[1024],sixs_out_filename[1024],line_in[256];
/* char tmp_file[1024], cmd_string[1024]; */
int i,j,k;
FILE *fd;
float tgoz,tgco2,tgo2,tgno2,tgch4,tgco;
int tm_band[SIXS_NB_BANDS]={25,26,27,28,29,30};
char short_name[1024];
char local_granule_id[1024];
char acq_date_string[MAX_DATE_LEN + 1];
const char *sat_names[SAT_MAX] = {"1", "2", "3", "4", "5", "7"};
const char *inst_names[INST_MAX] = {"M", "T", "E"};
const char *wrs_names[WRS_MAX] = {"1", "2"};
struct etm_spectral_function_t etm_spectral_function = {
{54,61,65,81,131,155},
{0.420,0.500,0.580,0.730,1.501,2.0},
{0.550,0.650,0.740,0.930,1.825,2.386},
{
{0.000,0.000,0.000,0.000,0.000,0.000,0.016,0.071,0.287,0.666,0.792,0.857,0.839,0.806,0.779,0.846,0.901,0.900,0.890,0.851,0.875,0.893,0.884,0.930,0.958,0.954,0.980,0.975,0.965,0.962,0.995,0.990,0.990,0.979,0.983,0.969,0.960,0.768,0.293,0.054,0.009,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.001,0.002,0.003,0.012,0.026,0.074,0.174,0.348,0.552,0.696,0.759,0.785,0.822,0.870,0.905,0.929,0.947,0.952,0.952,0.951,0.953,0.950,0.954,0.967,0.959,0.941,0.933,0.938,0.951,0.956,0.955,0.956,0.973,0.992,1.000,0.976,0.942,0.930,0.912,0.799,0.574,0.340,0.185,0.105,0.062,0.038,0.021,0.011,0.005,0.002,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.001,0.002,0.010,0.047,0.174,0.419,0.731,0.921,0.942,0.937,0.937,0.949,0.965,0.973,0.970,0.958,0.955,0.962,0.980,0.993,0.998,1.000,0.995,0.992,0.988,0.977,0.954,0.932,0.880,0.729,0.444,0.183,0.066,0.025,0.012,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.000,0.000,0.000,0.000,0.000,0.002,0.004,0.002,0.001,0.020,0.032,0.052,0.069,0.110,0.175,0.271,0.402,0.556,0.705,0.812,0.871,0.896,0.908,0.918,0.926,0.928,0.930,0.926,0.925,0.928,0.923,0.916,0.908,0.903,0.909,0.924,0.946,0.954,0.971,0.969,0.967,0.965,0.967,0.961,0.949,0.931,0.925,0.929,0.943,0.961,0.985,0.992,0.998,0.992,0.994,0.997,0.998,1.000,0.991,0.988,0.969,0.926,0.868,0.817,0.819,0.880,0.854,0.572,0.256,0.104,0.044,0.022,0.011,0.007,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.000,0.003,0.000,0.001,0.007,0.008,0.008,0.012,0.012,0.028,0.041,0.062,0.087,0.114,0.176,0.230,0.306,0.410,0.481,0.543,0.598,0.642,0.686,0.719,0.750,0.785,0.817,0.845,0.867,0.881,0.902,0.900,0.896,0.892,0.899,0.882,0.872,0.872,0.872,0.878,0.868,0.860,0.877,0.884,0.897,0.895,0.898,0.912,0.921,0.927,0.937,0.947,0.948,0.954,0.961,0.962,0.962,0.964,0.969,0.956,0.952,0.951,0.952,0.953,0.939,0.934,0.928,0.943,0.945,0.935,0.944,0.947,0.944,0.949,0.960,0.966,0.971,0.978,0.993,0.998,0.996,0.996,0.997,0.986,0.990,0.988,0.992,0.985,0.982,0.978,0.970,0.966,0.952,0.927,0.883,0.832,0.751,0.656,0.577,0.483,0.393,0.310,0.239,0.184,0.142,0.104,0.080,0.063,0.049,0.041,0.036,0.023,0.021,0.019,0.012,0.006,0.008,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.004,0.001,0.003,0.000,0.002,0.001,0.002,0.002,0.012,0.008,0.009,0.018,0.017,0.031,0.037,0.046,0.058,0.076,0.088,0.110,0.149,0.196,0.242,0.303,0.367,0.437,0.519,0.610,0.677,0.718,0.756,0.774,0.784,0.775,0.789,0.782,0.778,0.766,0.762,0.768,0.775,0.769,0.788,0.808,0.794,0.823,0.811,0.819,0.836,0.837,0.836,0.851,0.859,0.855,0.871,0.873,0.875,0.859,0.872,0.859,0.872,0.863,0.865,0.868,0.877,0.873,0.869,0.876,0.868,0.879,0.873,0.876,0.880,0.874,0.870,0.858,0.863,0.859,0.844,0.859,0.854,0.863,0.868,0.856,0.847,0.861,0.851,0.852,0.838,0.847,0.840,0.831,0.836,0.838,0.822,0.838,0.839,0.842,0.854,0.862,0.873,0.868,0.879,0.891,0.898,0.919,0.920,0.926,0.928,0.934,0.936,0.953,0.954,0.952,0.960,0.973,0.985,0.972,0.970,0.994,0.989,0.975,1.000,0.991,0.968,0.966,0.956,0.929,0.929,0.926,0.903,0.924,0.929,0.928,0.920,0.853,0.775,0.659,0.531,0.403,0.275,0.218,0.131,0.104,0.075,0.052,0.029,0.028,0.014,0.019,0.013,0.007,0.015,0.000,0.004}
}
};
sixs_tables->aot[0]=0.01;
sixs_tables->aot[1]=0.05;
sixs_tables->aot[2]=0.10;
sixs_tables->aot[3]=0.15;
sixs_tables->aot[4]=0.20;
sixs_tables->aot[5]=0.30;
sixs_tables->aot[6]=0.40;
sixs_tables->aot[7]=0.60;
sixs_tables->aot[8]=0.80;
sixs_tables->aot[9]=1.00;
sixs_tables->aot[10]=1.20;
sixs_tables->aot[11]=1.40;
sixs_tables->aot[12]=1.60;
sixs_tables->aot[13]=1.80;
sixs_tables->aot[14]=2.00;
/* Determine the 6s command and output filenames */
if (sprintf(short_name, "L%s%s%s", sat_names[meta->sat],
inst_names[meta->inst], "SR") < 0) {
fprintf(stderr, "ERROR:creating short name\n");
exit(-1);
}
if (!FormatDate(&meta->acq_date, DATE_FORMAT_DATEB, acq_date_string)) {
fprintf(stderr, "ERROR:formatting acquisition date\n");
exit(-1);
}
acq_date_string[4] = '\0';
sprintf(local_granule_id, "%s.a%4s%3s.w%1sp%03dr%03d",
short_name, acq_date_string, &acq_date_string[5],
wrs_names[meta->wrs_sys], meta->ipath, meta->irow);
/* Run 6s */
#ifdef _OPENMP
#pragma omp parallel for private (i, j, k, sixs_cmd_filename, sixs_out_filename, fd, cmd, line_in, tgoz, tgco2, tgo2, tgno2, tgch4, tgco)
#endif
for (i=0;i<SIXS_NB_BANDS;i++) {
for (j=0;j<SIXS_NB_AOT;j++) {
sprintf (sixs_cmd_filename, "sixs_cmd_%s_%d_%d", local_granule_id, i+1, j+1);
sprintf (sixs_out_filename, "sixs_output_%s_%d_%d", local_granule_id, i+1, j+1);
printf("Processing 6S for band %d AOT %2d\r",i+1,j+1);
fflush(stdout);
if ((fd=fopen(sixs_cmd_filename,"w"))==NULL) {
fprintf(stderr,"ERROR: creating temporary file %s\n",sixs_cmd_filename);
exit(-1);
}
fprintf(fd,"%s <<+ >%s\n",SIXS_APP,sixs_out_filename);
fprintf(fd,"0 (user defined)\n");
fprintf(fd,"%.2f %.2f %.2f %.2f %d %d (geometrical conditions sza saz vza vaz month day)\n",sixs_tables->sza,sixs_tables->phi,sixs_tables->vza,0.,sixs_tables->month,sixs_tables->day);
fprintf(fd,"8 (option for water vapor and ozone)\n");
fprintf(fd,"%.2f %.2f (water vapor and ozone)\n",sixs_tables->uwv,sixs_tables->uoz);
fprintf(fd,"1 (continental model)\n");
fprintf(fd,"0 (option for optical thickness at 550 nm)\n");
fprintf(fd,"%.3f (value of aot550\n",sixs_tables->aot[j]);
fprintf(fd,"%f (target level)\n",sixs_tables->target_alt);
fprintf(fd,"-1000 (sensor level : -1000=satellite level)\n");
switch (sixs_tables->Inst) {
case SIXS_INST_TM:
fprintf(fd,"%d (predefined band)\n",tm_band[i]);
break;
case SIXS_INST_ETM:
fprintf(fd,"1 (user defined filter function)\n");
fprintf(fd,"%05.3f %05.3f (wlinf wlsup)\n",etm_spectral_function.wlinf[i],etm_spectral_function.wlsup[i]);
for (k=0;k<etm_spectral_function.nbvals[i];k++) {
fprintf(fd,"%05.3f ",etm_spectral_function.response[i][k]);
if (!((k+1)%10))
fprintf(fd,"\n");
}
if (k%10)
fprintf(fd,"\n");
break;
default:
fprintf(stderr,"ERROR: Unknown Instrument in six_run parameters\n");
exit(-1);
}
fprintf(fd,"0 (homogeneous surface)\n");
fprintf(fd,"0 (no directional effects)\n");
fprintf(fd,"0 (constant value for rho)\n");
fprintf(fd,"%.3f (value of rho)\n",sixs_tables->srefl);
fprintf(fd,"-1 (no atmospheric correction)\n");
fprintf(fd,"0\n");
fprintf(fd,"+\n");
fclose(fd);
/* Modified 9/26/2014 to run bash shell vs. sh */
sprintf(cmd,"bash %s",sixs_cmd_filename);
if (system(cmd)) {
fprintf(stderr,"ERROR: Can't run 6S \n");
exit(-1);
}
if ((fd=fopen(sixs_out_filename,"r"))==NULL) {
fprintf(stderr,"ERROR: reading temporary file %s\n",sixs_out_filename);
exit(-1);
}
while (fgets(line_in,256,fd)) {
line_in[strlen(line_in)-1]='\0';
if (j==0) {
if (!strncmp(line_in,"* rayl. sca. trans. :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_r_down[i]);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_r_up[i]);
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_r[i]);
}
if (!strncmp(line_in,"* water \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_g_wv[i]);
}
if (!strncmp(line_in,"* ozone \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgoz);
}
if (!strncmp(line_in,"* co2 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgco2);
}
if (!strncmp(line_in,"* oxyg \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgo2);
}
if (!strncmp(line_in,"* no2 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgno2);
}
if (!strncmp(line_in,"* ch4 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgch4);
}
if (!strncmp(line_in,"* co \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgco);
}
sixs_tables->T_g_og[i]=tgoz*tgco2*tgo2*tgno2*tgno2*tgch4*tgco;
}
if (!strncmp(line_in,"* spherical albedo :",27)) {
k=27;
while (line_in[k]==' ') /* blank */
k++;
if (j==0)
sscanf(&line_in[k],"%f",&sixs_tables->S_r[i]);
while (line_in[k]!=' ') /* Rayleigh */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* Aerosol */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->S_ra[i][j]);
}
if (!strncmp(line_in,"* optical depth total:",27)) {
k=27;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* Rayleigh */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->aot_wavelength[i][j]);
}
if (!strncmp(line_in,"* aeros. sca. \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_a_down[i][j]);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_a_up[i][j]);
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_a[i][j]);
}
if (!strncmp(line_in,"* total sca. \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_ra_down[i][j]);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_ra_up[i][j]);
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_ra[i][j]);
}
if (!strncmp(line_in,"* reflectance I :",27)) {
k=27;
while (line_in[k]==' ') /* blank */
k++;
if (j==0)
sscanf(&line_in[k],"%f",&sixs_tables->rho_r[i]);
while (line_in[k]!=' ') /* rayleigh */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->rho_a[i][j]);
while (line_in[k]!=' ') /* aerosols */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->rho_ra[i][j]);
}
}
fclose(fd);
/* For OZONE debugging:
sprintf (tmp_file, "%s_b%d_aot%02d", sixs_cmd_filename, i, j);
sprintf (cmd_string, "cp %s %s", sixs_cmd_filename, tmp_file);
system (cmd_string);
sprintf (tmp_file, "%s_b%d_aot%02d", sixs_out_filename, i, j);
sprintf (cmd_string, "cp %s %s", sixs_out_filename, tmp_file);
system (cmd_string);
*/
unlink(sixs_cmd_filename);
unlink(sixs_out_filename);
} /* for j */
} /* for i */
printf ("\n");
return 0;
}
/* This function is not actually used in lndsr processing */
int create_6S_tables_water(sixs_tables_t *sixs_tables) {
char cmd[128],sixs_cmd_filename[128],sixs_out_filename[128],line_in[256];
int i,j,k;
FILE *fd;
float tgoz,tgco2,tgo2,tgno2,tgch4,tgco;
int tm_band[SIXS_NB_BANDS]={25,26,27,28,29,30};
char *tmpstr;
struct etm_spectral_function_t etm_spectral_function = {
{54,61,65,81,131,155},
{0.420,0.500,0.580,0.730,1.501,2.0},
{0.550,0.650,0.740,0.930,1.825,2.386},
{
{0.000,0.000,0.000,0.000,0.000,0.000,0.016,0.071,0.287,0.666,0.792,0.857,0.839,0.806,0.779,0.846,0.901,0.900,0.890,0.851,0.875,0.893,0.884,0.930,0.958,0.954,0.980,0.975,0.965,0.962,0.995,0.990,0.990,0.979,0.983,0.969,0.960,0.768,0.293,0.054,0.009,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.001,0.002,0.003,0.012,0.026,0.074,0.174,0.348,0.552,0.696,0.759,0.785,0.822,0.870,0.905,0.929,0.947,0.952,0.952,0.951,0.953,0.950,0.954,0.967,0.959,0.941,0.933,0.938,0.951,0.956,0.955,0.956,0.973,0.992,1.000,0.976,0.942,0.930,0.912,0.799,0.574,0.340,0.185,0.105,0.062,0.038,0.021,0.011,0.005,0.002,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.001,0.002,0.010,0.047,0.174,0.419,0.731,0.921,0.942,0.937,0.937,0.949,0.965,0.973,0.970,0.958,0.955,0.962,0.980,0.993,0.998,1.000,0.995,0.992,0.988,0.977,0.954,0.932,0.880,0.729,0.444,0.183,0.066,0.025,0.012,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.000,0.000,0.000,0.000,0.000,0.002,0.004,0.002,0.001,0.020,0.032,0.052,0.069,0.110,0.175,0.271,0.402,0.556,0.705,0.812,0.871,0.896,0.908,0.918,0.926,0.928,0.930,0.926,0.925,0.928,0.923,0.916,0.908,0.903,0.909,0.924,0.946,0.954,0.971,0.969,0.967,0.965,0.967,0.961,0.949,0.931,0.925,0.929,0.943,0.961,0.985,0.992,0.998,0.992,0.994,0.997,0.998,1.000,0.991,0.988,0.969,0.926,0.868,0.817,0.819,0.880,0.854,0.572,0.256,0.104,0.044,0.022,0.011,0.007,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.000,0.003,0.000,0.001,0.007,0.008,0.008,0.012,0.012,0.028,0.041,0.062,0.087,0.114,0.176,0.230,0.306,0.410,0.481,0.543,0.598,0.642,0.686,0.719,0.750,0.785,0.817,0.845,0.867,0.881,0.902,0.900,0.896,0.892,0.899,0.882,0.872,0.872,0.872,0.878,0.868,0.860,0.877,0.884,0.897,0.895,0.898,0.912,0.921,0.927,0.937,0.947,0.948,0.954,0.961,0.962,0.962,0.964,0.969,0.956,0.952,0.951,0.952,0.953,0.939,0.934,0.928,0.943,0.945,0.935,0.944,0.947,0.944,0.949,0.960,0.966,0.971,0.978,0.993,0.998,0.996,0.996,0.997,0.986,0.990,0.988,0.992,0.985,0.982,0.978,0.970,0.966,0.952,0.927,0.883,0.832,0.751,0.656,0.577,0.483,0.393,0.310,0.239,0.184,0.142,0.104,0.080,0.063,0.049,0.041,0.036,0.023,0.021,0.019,0.012,0.006,0.008,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000},
{0.004,0.001,0.003,0.000,0.002,0.001,0.002,0.002,0.012,0.008,0.009,0.018,0.017,0.031,0.037,0.046,0.058,0.076,0.088,0.110,0.149,0.196,0.242,0.303,0.367,0.437,0.519,0.610,0.677,0.718,0.756,0.774,0.784,0.775,0.789,0.782,0.778,0.766,0.762,0.768,0.775,0.769,0.788,0.808,0.794,0.823,0.811,0.819,0.836,0.837,0.836,0.851,0.859,0.855,0.871,0.873,0.875,0.859,0.872,0.859,0.872,0.863,0.865,0.868,0.877,0.873,0.869,0.876,0.868,0.879,0.873,0.876,0.880,0.874,0.870,0.858,0.863,0.859,0.844,0.859,0.854,0.863,0.868,0.856,0.847,0.861,0.851,0.852,0.838,0.847,0.840,0.831,0.836,0.838,0.822,0.838,0.839,0.842,0.854,0.862,0.873,0.868,0.879,0.891,0.898,0.919,0.920,0.926,0.928,0.934,0.936,0.953,0.954,0.952,0.960,0.973,0.985,0.972,0.970,0.994,0.989,0.975,1.000,0.991,0.968,0.966,0.956,0.929,0.929,0.926,0.903,0.924,0.929,0.928,0.920,0.853,0.775,0.659,0.531,0.403,0.275,0.218,0.131,0.104,0.075,0.052,0.029,0.028,0.014,0.019,0.013,0.007,0.015,0.000,0.004}
}
};
sixs_tables->aot[0]=0.01;
sixs_tables->aot[1]=0.05;
sixs_tables->aot[2]=0.10;
sixs_tables->aot[3]=0.15;
sixs_tables->aot[4]=0.20;
sixs_tables->aot[5]=0.30;
sixs_tables->aot[6]=0.40;
sixs_tables->aot[7]=0.60;
sixs_tables->aot[8]=0.80;
sixs_tables->aot[9]=1.00;
sixs_tables->aot[10]=1.20;
sixs_tables->aot[11]=1.40;
sixs_tables->aot[12]=1.60;
sixs_tables->aot[13]=1.80;
sixs_tables->aot[14]=2.00;
printf ("DEBUG: in compute_6S_tables_water -- shouldn't be here!\n");
if ((tmpstr = tmpnam(sixs_cmd_filename)) == NULL) {
fprintf(stderr,"ERROR: creating temporary file %s\n",sixs_cmd_filename);
exit(-1);
}
if ((tmpstr = tmpnam(sixs_out_filename)) == NULL) {
fprintf(stderr,"ERROR: creating temporary file %s\n",sixs_out_filename);
exit(-1);
}
for (i=0;i<SIXS_NB_BANDS;i++) {
for (j=0;j<SIXS_NB_AOT;j++) {
printf("Processing Band %d AOT %d\n",i+1,j+1);
if ((fd=fopen(sixs_cmd_filename,"w"))==NULL) {
fprintf(stderr,"ERROR: creating temporary file %s\n",sixs_cmd_filename);
exit(-1);
}
fprintf(fd,"%s <<+ >%s\n",SIXS_APP,sixs_out_filename);
fprintf(fd,"0 (user defined)\n");
fprintf(fd,"%.2f %.2f %.2f %.2f %d %d (geometrical conditions sza saz vza vaz month day)\n",sixs_tables->sza,sixs_tables->phi,sixs_tables->vza,0.,sixs_tables->month,sixs_tables->day);
fprintf(fd,"8 (option for water vapor and ozone)\n");
fprintf(fd,"%.2f %.2f (water vapor and ozone)\n",sixs_tables->uwv,sixs_tables->uoz);
fprintf(fd,"2 (maritime model)\n");
fprintf(fd,"0 (option for optical thickness at 550 nm)\n");
fprintf(fd,"%.3f (value of aot550\n",sixs_tables->aot[j]);
fprintf(fd,"%f (target level)\n",sixs_tables->target_alt);
fprintf(fd,"-1000 (sensor level : -1000=satellite level)\n");
switch (sixs_tables->Inst) {
case SIXS_INST_TM:
fprintf(fd,"%d (predefined band)\n",tm_band[i]);
break;
case SIXS_INST_ETM:
fprintf(fd,"1 (user defined filter function)\n");
fprintf(fd,"%05.3f %05.3f (wlinf wlsup)\n",etm_spectral_function.wlinf[i],etm_spectral_function.wlsup[i]);
for (k=0;k<etm_spectral_function.nbvals[i];k++) {
fprintf(fd,"%05.3f ",etm_spectral_function.response[i][k]);
if (!((k+1)%10))
fprintf(fd,"\n");
}
if (k%10)
fprintf(fd,"\n");
break;
default:
fprintf(stderr,"ERROR: Unknown Instrument in six_run parameters\n");
exit(-1);
}
fprintf(fd,"0 (homogeneous surface)\n");
fprintf(fd,"1 (directional effects)\n");
fprintf(fd,"6 (Ocean)\n");
fprintf(fd,"2.0 0.0 0.0 .10 (wind speed(m/s) wind azimuth(deg) salinity(deg) pigment concentration(mg/m3))\n");
fprintf(fd,"%.3f (value of rho)\n",sixs_tables->srefl);
fprintf(fd,"-1 (no atmospheric correction)\n");
fprintf(fd,"+\n");
fclose(fd);
/* Modified 9/26/2014 to run bash shell vs. sh */
sprintf(cmd,"bash %s",sixs_cmd_filename);
if (system(cmd)) {
fprintf(stderr,"ERROR: Can't run 6S \n");
exit(-1);
}
if ((fd=fopen(sixs_out_filename,"r"))==NULL) {
fprintf(stderr,"ERROR: reading temporary file %s\n",sixs_out_filename);
exit(-1);
}
while (fgets(line_in,256,fd)) {
line_in[strlen(line_in)-1]='\0';
if (j==0) {
if (!strncmp(line_in,"* rayl. sca. trans. :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_r_down[i]);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_r_up[i]);
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_r[i]);
}
if (!strncmp(line_in,"* water \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_g_wv[i]);
}
if (!strncmp(line_in,"* ozone \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgoz);
}
if (!strncmp(line_in,"* co2 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgco2);
}
if (!strncmp(line_in,"* oxyg \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgo2);
}
if (!strncmp(line_in,"* no2 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgno2);
}
if (!strncmp(line_in,"* ch4 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgch4);
}
if (!strncmp(line_in,"* co \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgco);
}
sixs_tables->T_g_og[i]=tgoz*tgco2*tgo2*tgno2*tgno2*tgch4*tgco;
}
if (!strncmp(line_in,"* spherical albedo :",27)) {
k=27;
while (line_in[k]==' ') /* blank */
k++;
if (j==0)
sscanf(&line_in[k],"%f",&sixs_tables->S_r[i]);
while (line_in[k]!=' ') /* Rayleigh */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* Aerosol */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->S_ra[i][j]);
}
if (!strncmp(line_in,"* optical depth total:",27)) {
k=27;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* Rayleigh */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->aot_wavelength[i][j]);
}
if (!strncmp(line_in,"* aeros. sca. \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_a_down[i][j]);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_a_up[i][j]);
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_a[i][j]);
}
if (!strncmp(line_in,"* total sca. \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_ra_down[i][j]);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_ra_up[i][j]);
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->T_ra[i][j]);
}
if (!strncmp(line_in,"* reflectance I :",27)) {
k=27;
while (line_in[k]==' ') /* blank */
k++;
if (j==0)
sscanf(&line_in[k],"%f",&sixs_tables->rho_r[i]);
while (line_in[k]!=' ') /* rayleigh */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->rho_a[i][j]);
while (line_in[k]!=' ') /* aerosols */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->rho_ra[i][j]);
}
if (!strncmp(line_in,"* apparent reflectance",28)) {
k=28;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_tables->rho_toa[i][j]);
}
}
fclose(fd);
}
}
unlink(sixs_cmd_filename);
unlink(sixs_out_filename);
return 0;
}
/* This function is not actually used in lndsr processing */
int compute_atmos_params_6S(sixs_atmos_params_t *sixs_atmos_params) {
char cmd[128],sixs_cmd_filename[128],sixs_out_filename[128],line_in[256];
int k;
float tgoz,tgco2,tgo2,tgno2,tgch4,tgco;
int tm_band[SIXS_NB_BANDS]={25,26,27,28,29,30};
char *tmpstr;
FILE *fd;
printf ("DEBUG: in compute_atmos_params_6S -- shouldn't be here!\n");
if ((tmpstr = tmpnam(sixs_cmd_filename)) == NULL) {
fprintf(stderr,"ERROR: creating temporary file %s\n",sixs_cmd_filename);
exit(-1);
}
if ((tmpstr = tmpnam(sixs_out_filename)) == NULL) {
fprintf(stderr,"ERROR: creating temporary file %s\n",sixs_out_filename);
exit(-1);
}
if ((fd=fopen(sixs_cmd_filename,"w"))==NULL) {
fprintf(stderr,"ERROR: creating temporary file %s\n",sixs_cmd_filename);
exit(-1);
}
fprintf(fd,"%s <<+ >%s\n",SIXS_APP,sixs_out_filename);
fprintf(fd,"0\n");
fprintf(fd,"%.2f %.2f %.2f %.2f %d %d\n",sixs_atmos_params->sza,sixs_atmos_params->phi,sixs_atmos_params->vza,0.,sixs_atmos_params->month,sixs_atmos_params->day);
fprintf(fd,"8\n");
fprintf(fd,"%.2f %.2f\n",sixs_atmos_params->uwv,sixs_atmos_params->uoz);
if (sixs_atmos_params->aot > 0) {
fprintf(fd,"1\n");
fprintf(fd,"0\n");
fprintf(fd,"%.3f\n",sixs_atmos_params->aot);
} else {
fprintf(fd,"0\n");
fprintf(fd,"-1\n");
}
fprintf(fd,"0\n");
fprintf(fd,"-1000\n");
fprintf(fd,"%d\n",tm_band[sixs_atmos_params->band]);
fprintf(fd,"0\n");
fprintf(fd,"0\n");
fprintf(fd,"0\n");
fprintf(fd,"%.3f\n",sixs_atmos_params->srefl);
fprintf(fd,"-1\n");
fprintf(fd,"0\n");
fprintf(fd,"+\n");
fclose(fd);
sprintf(cmd,"bash %s",sixs_cmd_filename);
if (system(cmd)) {
fprintf(stderr,"ERROR: Can't run 6S \n");
exit(-1);
}
if ((fd=fopen(sixs_out_filename,"r"))==NULL) {
fprintf(stderr,"ERROR: reading temporary file %s\n",sixs_out_filename);
exit(-1);
}
while (fgets(line_in,256,fd)) {
line_in[strlen(line_in)-1]='\0';
if (!strncmp(line_in,"* spherical albedo :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->S_r);
}
if (!strncmp(line_in,"* rayl. sca. trans. :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->T_r_down);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->T_r_up);
}
if (!strncmp(line_in,"* aeros. sca. \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->T_a_down);
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->T_a_up);
}
if (!strncmp(line_in,"* reflectance I :",27)) {
k=27;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->rho_r);
while (line_in[k]!=' ') /* rayleigh */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->rho_a);
}
if (!strncmp(line_in,"* water \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&sixs_atmos_params->T_g_wv);
}
if (!strncmp(line_in,"* ozone \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgoz);
}
if (!strncmp(line_in,"* co2 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgco2);
}
if (!strncmp(line_in,"* oxyg \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgo2);
}
if (!strncmp(line_in,"* no2 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgno2);
}
if (!strncmp(line_in,"* ch4 \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgch4);
}
if (!strncmp(line_in,"* co \" \" :",27)) {
k=27;
while (line_in[k]==' ')
k++;
while (line_in[k]!=' ') /* downward */
k++;
while (line_in[k]==' ') /* blank */
k++;
while (line_in[k]!=' ') /* upward */
k++;
while (line_in[k]==' ') /* blank */
k++;
sscanf(&line_in[k],"%f",&tgco);
}
}
fclose(fd);
sixs_atmos_params->T_g_og=tgoz*tgco2*tgo2*tgno2*tgno2*tgch4*tgco;
unlink(sixs_cmd_filename);
unlink(sixs_out_filename);
return 0;
}
#ifdef SAVE_6S_RESULTS
int read_6S_results_from_file(char *filename,sixs_tables_t *sixs_tables) {
FILE *fd;
int run_sixs,i,j;
if ((fd=fopen(filename,"r"))==NULL)
return 1;
run_sixs=0;
if (fscanf(fd,"%d %d",&sixs_tables->month,&sixs_tables->day) != 2)
run_sixs=1;
if (fscanf(fd,"%f",&sixs_tables->srefl)!=1)
run_sixs=1;
if (fscanf(fd,"%f %f %f",&sixs_tables->sza,&sixs_tables->vza,&sixs_tables->phi)!=3)
run_sixs=1;
if (fscanf(fd,"%f %f %f",&sixs_tables->uwv,&sixs_tables->uoz,&sixs_tables->target_alt)!=3)
run_sixs=1;
for (i=0;i<SIXS_NB_AOT;i++)
if (fscanf(fd,"%f ",&sixs_tables->aot[i])!=1)
run_sixs=1;
for (i=0;i<SIXS_NB_BANDS;i++) {
if (fscanf(fd,"%f %f %f %f %f %f %f",&sixs_tables->S_r[i],&sixs_tables->T_r_up[i],&sixs_tables->T_r_down[i],&sixs_tables->T_r[i],&sixs_tables->T_g_wv[i],&sixs_tables->T_g_og[i],&sixs_tables->rho_r[i])!=7)
run_sixs=1;
for (j=0;j<SIXS_NB_AOT;j++)
if (fscanf(fd,"%f %f %f %f %f %f %f %f %f %f %f",&sixs_tables->aot_wavelength[i][j],&sixs_tables->T_a_up[i][j],&sixs_tables->T_a_down[i][j],&sixs_tables->T_a[i][j],&sixs_tables->rho_ra[i][j],&sixs_tables->rho_a[i][j],&sixs_tables->S_ra[i][j],&sixs_tables->T_ra_up[i][j],&sixs_tables->T_ra_down[i][j],&sixs_tables->T_ra[i][j],&sixs_tables->rho_toa[i][j])!=11)
run_sixs=1;
}
fclose(fd);
return run_sixs;
}
int write_6S_results_to_file(char *filename,sixs_tables_t *sixs_tables) {
FILE *fd;
int i,j;
if ((fd=fopen(filename,"w"))==NULL)
return -1;
fprintf(fd,"%02d %03d\n",sixs_tables->month,sixs_tables->day);
fprintf(fd,"%010.6f\n",sixs_tables->srefl);
fprintf(fd,"%010.6f %010.6f %010.6f\n",sixs_tables->sza,sixs_tables->vza,sixs_tables->phi);
fprintf(fd,"%010.6f %010.6f %010.2f\n",sixs_tables->uwv,sixs_tables->uoz,sixs_tables->target_alt);
for (i=0;i<SIXS_NB_AOT;i++)
fprintf(fd,"%07.4f ",sixs_tables->aot[i]);
fprintf(fd,"\n");
for (i=0;i<SIXS_NB_BANDS;i++) {
fprintf(fd,"%010.6f %010.6f %010.6f %010.6f %010.6f %010.6f %010.6f\n",sixs_tables->S_r[i],sixs_tables->T_r_up[i],sixs_tables->T_r_down[i],sixs_tables->T_r[i],sixs_tables->T_g_wv[i],sixs_tables->T_g_og[i],sixs_tables->rho_r[i]);
for (j=0;j<SIXS_NB_AOT;j++)
fprintf(fd,"%07.4f %010.6f %010.6f %010.6f %010.6f %010.6f %010.6f %010.6f %010.6f %010.6f %010.6f\n",sixs_tables->aot_wavelength[i][j],sixs_tables->T_a_up[i][j],sixs_tables->T_a_down[i][j],sixs_tables->T_a[i][j],sixs_tables->rho_ra[i][j],sixs_tables->rho_a[i][j],sixs_tables->S_ra[i][j],sixs_tables->T_ra_up[i][j],sixs_tables->T_ra_down[i][j],sixs_tables->T_ra[i][j],sixs_tables->rho_toa[i][j]);
}
fclose(fd);
return 0;
}
#endif
|
subsref_mex_openmp.c | /* DOES NOT WORK PROPERLY, AS GETTING OPENMP SUPPORT IN MATLAB IS DIFFICULT.
*
* Compile using:
* mex -lmwlapack -lmwblas -largeArrayDims subsref_mex_openmp.c
* calling (do NOT call directly. Only meant to be called through TTeMPS.subsref
* subsref_mex( n, r, transpose(ind), Cores)
*/
/*
* TTeMPS Toolbox.
* Michael Steinlechner, 2013-2014
* Questions and contact: michael.steinlechner@epfl.ch
* BSD 2-clause license, see LICENSE.txt
*/
#include "mex.h"
#include "blas.h"
#include <omp.h>
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] ) {
/* input variables */
double* n_raw;
double* r_raw;
double* ind_raw;
double** C;
/* output variables */
double* result;
/* internal variables */
double* P;
double* current;
mwSignedIndex* n;
mwSignedIndex* r;
mwSignedIndex* ind;
mwSignedIndex numSubsref;
mwSignedIndex d;
mwSignedIndex i;
mwSignedIndex j;
mwSignedIndex k;
mwSignedIndex maxrank = 1;
/* get sizes */
n_raw = mxGetPr( prhs[0] );
/* get ranks */
r_raw = mxGetPr( prhs[1] );
/* get indices */
ind_raw = mxGetPr( prhs[2] );
d = mxGetM( prhs[2] );
numSubsref = mxGetN( prhs[2] );
n = mxMalloc( d*sizeof(mwSignedIndex) );
r = mxMalloc( (d+1)*sizeof(mwSignedIndex) );
ind = mxMalloc( d*numSubsref*sizeof(mwSignedIndex) );
/* Convert index arrays to integer arrays as they get converted
* to double arrays when passing to mex.
* Converting beforehand allows to avoid multiple typecasts inside the inner loop */
for( i = 0; i < d; ++i ) {
n[i] = (mwSignedIndex) n_raw[i];
r[i] = (mwSignedIndex) r_raw[i];
if( r[i] > maxrank )
maxrank = r[i];
}
r[d] = (mwSize) r_raw[d];
for( i = 0; i < numSubsref*d; ++i ) {
ind[i] = (mwSignedIndex) ind_raw[i];
}
/* Get pointers to the matrices within the cell array */
C = mxMalloc( d*sizeof(double*) );
for( i = 0; i<d; ++i ) {
C[i] = mxGetPr( mxGetCell( prhs[3], i ) );
}
/* Allocate space for output */
plhs[0] = mxCreateDoubleMatrix( numSubsref, 1, mxREAL);
result = mxGetPr( plhs[0] );
/* helper variables for dgemv call */
char transa = 'T';
mwSignedIndex ONE_i = 1;
double ONE_d = 1.0;
double ZERO_d = 0.0;
#pragma omp parallel shared(n,r,ind,C,result) private(i,j,k,P,current)
{
/* Allocate enough space for internal intermediate results */
P = malloc( maxrank*sizeof(double) );
current = malloc( maxrank*sizeof(double) );
#pragma omp for
for( j = 0; j < numSubsref; ++j ) {
/* first two cores */
dgemv( &transa, &r[1], &r[2], &ONE_d,
&C[1][ (ind[d*j+1]-1)*r[1]*r[2] ],
&r[1],
&C[0][ (ind[d*j]-1)*r[0]*r[1] ],
&ONE_i, &ZERO_d, P, &ONE_i);
/* loop over remaining cores */
for( i = 2; i < d; ++i ) {
/* copy over the previous result to free space at P
* (necessary because dgemv does not work in-place */
for( k = 0; k < r[i]; ++k )
current[k] = P[k];
dgemv( &transa, &r[i], &r[i+1], &ONE_d,
&C[i][ (ind[d*j+i]-1)*r[i]*r[i+1] ],
&r[i],
current,
&ONE_i, &ZERO_d, P, &ONE_i);
}
result[j] = P[0];
}
free( P );
free( current );
}
mxFree( n );
mxFree( r );
mxFree( ind );
mxFree( C );
}
|
comath.h | #ifndef COMATH
#define COMATH
#include "vmath.h"
#include <vector>
#include <iostream>
namespace vec {
template <class VectorType, class RealType = double>
static inline RealType strike_slip(const VectorType &a, const size_t dt) {
RealType accum = RealType();
#pragma omp parallel for reduction(+ : accum)
for (size_t i = 0; i < a.size() - dt; ++i) {
accum += fabs(a[i] - a[i + dt]);
}
return accum / (RealType)(a.size() - dt);
}
template <class VectorType>
static inline VectorType alter_johnson(const VectorType &a, double part = 0.6) {
VectorType result(ceil(a.size() * part) - 1);
#pragma omp parallel for
for (size_t i = 1; i <= result.size(); ++i) {
result[i - 1] = strike_slip(a, i);
}
return result;
}
template <class T, class VectorType>
static inline void print(const VectorType &v, std::ostream &out = std::cerr) {
for (auto &vi : v) out << vi << ' ';
out << std::endl;
}
}
#endif // COMATH
|
ast-dump-openmp-begin-declare-variant_7.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
int OK_1(void);
#pragma omp begin declare variant match(implementation={vendor(intel)})
int OK_1(void) {
return 1;
}
int OK_2(void) {
return 1;
}
int not_OK(void) {
return 1;
}
int OK_3(void) {
return 1;
}
#pragma omp end declare variant
int OK_3(void);
int test() {
// Should cause an error due to not_OK()
return OK_1() + not_OK() + OK_3();
}
// Make sure:
// - we see a single error for `not_OK`
// - we do not see errors for OK_{1,2,3}
// FIXME: We actually do not see there error here.
// This case is unlikely to happen in practise and hard to diagnose during SEMA.
// We will issue an error during code generation instead. This is similar to the
// diagnosis in other multi-versioning schemes.
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:14> col:5 used OK_1 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <line:8:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'OK_1[implementation={vendor(intel)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <col:1, line:10:1> line:8:1 OK_1[implementation={vendor(intel)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_4:0x[a-z0-9]*]] <col:16, line:10:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_5:0x[a-z0-9]*]] <line:9:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_6:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:11:1, col:14> col:5 implicit OK_2 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'OK_2[implementation={vendor(intel)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:13:1> line:11:1 OK_2[implementation={vendor(intel)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:16, line:13:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:12:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:14:1, col:16> col:5 implicit used not_OK 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_15:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_16:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_17:0x[a-z0-9]*]] 'not_OK[implementation={vendor(intel)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_17]] <col:1, line:16:1> line:14:1 not_OK[implementation={vendor(intel)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:18, line:16:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:15:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:17:1, col:14> col:5 implicit used OK_3 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_22:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_23:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_24:0x[a-z0-9]*]] 'OK_3[implementation={vendor(intel)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_24]] <col:1, line:19:1> line:17:1 OK_3[implementation={vendor(intel)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_25:0x[a-z0-9]*]] <col:16, line:19:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_26:0x[a-z0-9]*]] <line:18:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] prev [[ADDR_21]] <line:22:1, col:14> col:5 used OK_3 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_29:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(intel)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_23]] <line:17:1> 'int ({{.*}})' Function [[ADDR_24]] 'OK_3[implementation={vendor(intel)}]' 'int ({{.*}})'
// CHECK-NEXT: `-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:24:1, line:27:1> line:24:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_31:0x[a-z0-9]*]] <col:12, line:27:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_32:0x[a-z0-9]*]] <line:26:3, col:35>
// CHECK-NEXT: `-BinaryOperator [[ADDR_33:0x[a-z0-9]*]] <col:10, col:35> 'int' '+'
// CHECK-NEXT: |-BinaryOperator [[ADDR_34:0x[a-z0-9]*]] <col:10, col:26> 'int' '+'
// CHECK-NEXT: | |-CallExpr [[ADDR_35:0x[a-z0-9]*]] <col:10, col:15> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_37:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'OK_1' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_38:0x[a-z0-9]*]] <col:19, col:26> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_39:0x[a-z0-9]*]] <col:19> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_40:0x[a-z0-9]*]] <col:19> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'not_OK' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_41:0x[a-z0-9]*]] <col:30, col:35> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_42:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_43:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_28]] 'OK_3' 'int ({{.*}})'
|
uni_compiler_explorer.c | // TODO Mark regions
#include <assert.h>
#include <getopt.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "../../support/timer.h"
#include <omp.h>
#define T int64_t
static int pos;
static T *A;
static T *B;
static T *C;
static T *C2;
// Create a "test file"
static T *create_test_file(unsigned int nr_elements) {
// srand(0);
A = (T *)malloc(nr_elements * sizeof(T));
B = (T *)malloc(nr_elements * sizeof(T));
C = (T *)malloc(nr_elements * sizeof(T));
printf("nr_elements\t%u\t", nr_elements);
for (int i = 0; i < nr_elements; i++) {
// A[i] = (unsigned int) (rand());
// A[i] = i+1;
// A[i] = i%2==0?i+1:i;
A[i] = i % 2 == 0 ? i : i + 1;
B[i] = 0;
}
return A;
}
// Compute output in the host
static int unique_host(int size, int t) {
pos = 0;
C[pos] = A[pos];
omp_set_num_threads(t);
#pragma omp parallel for
for (int my = 1; my < size; my++) {
if (A[my] != A[my - 1]) {
int p;
#pragma omp atomic update
pos++;
p = pos;
C[p] = A[my];
}
}
return pos;
}
// Params
typedef struct Params {
int input_size;
int n_warmup;
int n_reps;
int n_threads;
} Params;
void usage() {
fprintf(stderr,
"\nUsage: ./program [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -t <T> # of threads (default=8)"
"\n -w <W> # of untimed warmup iterations (default=1)"
"\n -e <E> # of timed repetition iterations (default=3)"
"\n"
"\nBenchmark-specific options:"
"\n -i <I> input size (default=8M elements)"
"\n");
}
struct Params input_params(int argc, char **argv) {
struct Params p;
p.input_size = 16 << 20;
p.n_warmup = 1;
p.n_reps = 3;
p.n_threads = 8;
int opt;
while ((opt = getopt(argc, argv, "hd:i:w:e:t:")) >= 0) {
switch (opt) {
case 'h':
usage();
exit(0);
break;
case 'i':
p.input_size = atoi(optarg);
break;
case 'w':
p.n_warmup = atoi(optarg);
break;
case 'e':
p.n_reps = atoi(optarg);
break;
case 't':
p.n_threads = atoi(optarg);
break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(p.n_threads > 0 && "Invalid # of ranks!");
return p;
}
// Main
int main() {
struct Params p = input_params(argc, argv);
const unsigned int file_size = p.input_size;
uint32_t accum = 0;
int total_count;
// Create an input file with arbitrary data
create_test_file(file_size);
Timer timer;
start(&timer, 0, 0);
total_count = unique_host(file_size, p.n_threads);
stop(&timer, 0);
printf("Total count = %d\t", total_count);
printf("Kernel ");
print(&timer, 0, 1);
printf("\n");
free(A);
free(B);
free(C);
return 0;
}
|
omp_pi.c | /**
* Shared memory (OpenMP) parallel computation of pi.
*
* @author Akash Pallath
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define REFPI 3.1415926535897932384626433
long double pi(long long nsteps){
long long i;
long double step, sum = 0.0;
step = 1.0 / ((long double) nsteps);
#pragma omp parallel
{
long double x; //private to process
#pragma omp for reduction(+:sum)
for(i = 0; i < nsteps; i++){
x = (i + 0.5) * step;
sum += 4.0 / (1.0 + x * x);
}
}
return step * sum;
}
int main(int argc, char* argv[]){
long long nsteps;
if(argc < 2){
printf("Required argument: number of steps to compute pi for\n");
exit(-1);
}
nsteps = atoll(argv[1]);
long double comp_pi = pi(nsteps);
long double error = fabs(comp_pi - REFPI);
printf("%lli steps; pi = %.25Lf; error = %.25Lf\n", nsteps, comp_pi, error);
}
|
conv_dw_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include "conv_dw_kernel_arm.h"
#include "conv_dw_k5_k7_kernel_arm.h"
#include "conv_dw_dilation_kernel_arm.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
#ifdef __aarch64__
void dw_k3s2p0(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s2p0p1(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s1p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
void dw_k3s2p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int channel_size = input_h * input_w;
int channel_size_out = output_h * output_w;
int pad_h0 = pads[0];
int pad_h1 = pads[2];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s1p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
else if (pad_h0 == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
if (pad_h1 == 0)
dw_k3s2p0(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w, activation);
else
dw_k3s2p0p1(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w,
activation);
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s2p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
}
#else
void dw_k3s2(float* input, float* kernel, float* output, int channel, int width, int height, float* bias, int pad0);
void dw_k3s2_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s2_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s1p1(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int pad_h0 = pads[0];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s1p1_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
else
dw_k3s1p1_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
else
{
dw_k3s1p1(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
}
}
else if (stride == 2)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s2_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
else
dw_k3s2_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
else
{
dw_k3s2(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
}
}
}
#endif
int conv_dw_prerun(struct tensor* input_tensor, struct tensor* filter_tensor,
struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int input_c = input_tensor->dims[1];
int input_h = input_tensor->dims[2];
int input_w = input_tensor->dims[3];
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int pad_h1 = param->pad_h1;
int pad_w1 = param->pad_w1;
int padded_in_h = input_h + pad_h0 + pad_h1;
int padded_in_w = input_w + pad_w0 + pad_w1;
priv_info->input_pad = sys_malloc(batch * input_c * padded_in_h * padded_in_w * sizeof(float));
memset(priv_info->input_pad, 0, batch * input_c * padded_in_h * padded_in_w * sizeof(float));
return 0;
}
int conv_dw_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity)
{
/* param */
int pads[4];
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
pads[0] = param->pad_h0;
pads[1] = param->pad_w0;
pads[2] = param->pad_h1;
pads[3] = param->pad_w1;
if (stride_h != stride_w)
return -1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int output_size = out_c * out_h * out_w;
int padded_in_h = in_h + param->pad_h0 + param->pad_h1;
int padded_in_w = in_w + param->pad_w0 + param->pad_w1;
/* buffer addr */
float* input_buf = (float*)input_tensor->data;
float* kernel_buf = (float*)filter_tensor->data;
float* output_buf = (float*)output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor)
biases_buf = (float*)bias_tensor->data;
for (int n = 0; n < batch; n++) // batch size
{
float* cur_input = input_buf + n * input_size * group;
float* cur_output = output_buf + n * output_size * group;
if (dilation_h != 1 && dilation_w != 1 && dilation_h == pads[0])
{
conv_dw_dilation_run(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, pads[0], act_type,
num_thread);
}
else if (kernel_h == 3 && kernel_w == 3)
{
DirectConv(cur_input, in_h, in_w, cur_output, out_h, out_w, kernel_buf, group, stride_h, biases_buf, pads,
act_type, num_thread, cpu_affinity);
}
else if (kernel_h == 5 && kernel_w == 5)
{
if (stride_h == 1)
{
pad_0_align_3D((float*)conv_info->input_pad + n * group * padded_in_h * padded_in_w, cur_input,
in_h, in_w, padded_in_h, padded_in_w, group, param->pad_h0, param->pad_w0);
depthwise_conv_k5s1((float*)conv_info->input_pad, kernel_buf, biases_buf, cur_output, padded_in_h, padded_in_w, group, out_h, out_w,
act_type, num_thread);
}
else if (stride_h == 2)
depthwise_conv_k5s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
else if (kernel_h == 7 && kernel_w == 7)
{
if (stride_h == 1)
depthwise_conv_k7s1(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
else if (stride_h == 2)
depthwise_conv_k7s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
}
return 0;
}
int conv_dw_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->input_pad != NULL)
{
sys_free(priv_info->input_pad);
priv_info->input_pad = NULL;
}
return 0;
} |
lotus5_fmt_plug.c | //original work by Jeff Fay
//some optimisations by bartavelle at bandecon.com
/* OpenMP support and further optimizations (including some code rewrites)
* by Solar Designer */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_lotus5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_lotus5);
#else
#include <stdio.h>
#include <string.h>
#include "misc.h"
#include "formats.h"
#include "common.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#ifdef __x86_64__
#define LOTUS_N 3
#define LOTUS_N_STR " X3"
#else
#define LOTUS_N 2
#define LOTUS_N_STR " X2"
#endif
/*preprocessor constants that John The Ripper likes*/
#define FORMAT_LABEL "lotus5"
#define FORMAT_NAME "Lotus Notes/Domino 5"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR LOTUS_N_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 16
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT LOTUS_N
/* Must be divisible by any LOTUS_N (thus, by 2 and 3) */
#define MAX_KEYS_PER_CRYPT 0x900
/*A struct used for JTR's benchmarks*/
static struct fmt_tests tests[] = {
{"06E0A50B579AD2CD5FFDC48564627EE7", "secret"},
{"355E98E7C7B59BD810ED845AD0FD2FC4", "password"},
{"CD2D90E8E00D8A2A63A81F531EA8A9A3", "lotus"},
{"69D90B46B1AC0912E5CCF858094BBBFC", "dirtydog"},
{NULL}
};
static const unsigned char lotus_magic_table[] = {
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36
};
/*Some more JTR variables*/
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int n = omp_get_max_threads();
if (n < 1)
n = 1;
n *= 2;
if (n > self->params.max_keys_per_crypt)
n = self->params.max_keys_per_crypt;
self->params.min_keys_per_crypt = n;
#endif
crypt_key = mem_calloc_tiny(
(sizeof(*crypt_key) + sizeof(*saved_key)) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
saved_key = (void *)((char *)crypt_key +
sizeof(*crypt_key) * self->params.max_keys_per_crypt);
}
/*Utility function to convert hex to bin */
static void * binary (char *ciphertext)
{
static char realcipher[BINARY_SIZE];
int i;
for (i = 0; i < BINARY_SIZE; i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
return ((void *) realcipher);
}
/*Another function required by JTR: decides whether we have a valid
* ciphertext */
static int
valid (char *ciphertext, struct fmt_main *self)
{
int i;
for (i = 0; i < CIPHERTEXT_LENGTH; i++)
if (!(((ciphertext[i] >= '0') && (ciphertext[i] <= '9'))
|| ((ciphertext[i] >= 'a') && (ciphertext[i] <= 'f'))
|| ((ciphertext[i] >= 'A') && (ciphertext[i] <= 'F'))))
{
return 0;
}
return !ciphertext[i];
}
/*sets the value of saved_key so we can play with it*/
static void set_key (char *key, int index)
{
strnzcpy (saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
/*retrieves the saved key; used by JTR*/
static char * get_key (int index)
{
return saved_key[index];
}
static int cmp_all (void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one (void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact (char *source, int index)
{
return 1;
}
/*Beginning of private functions*/
/* Takes the plaintext password and generates the second row of our
* working matrix for the final call to the mixing function*/
static void MAYBE_INLINE
#if LOTUS_N == 3
lotus_transform_password (unsigned char *i0, unsigned char *o0,
unsigned char *i1, unsigned char *o1,
unsigned char *i2, unsigned char *o2)
#else
lotus_transform_password (unsigned char *i0, unsigned char *o0,
unsigned char *i1, unsigned char *o1)
#endif
{
unsigned char t0, t1;
#if LOTUS_N == 3
unsigned char t2;
#endif
int i;
#if LOTUS_N == 3
t0 = t1 = t2 = 0;
#else
t0 = t1 = 0;
#endif
for (i = 0; i < 8; i++)
{
t0 = *o0++ = lotus_magic_table[ARCH_INDEX(*i0++ ^ t0)];
t1 = *o1++ = lotus_magic_table[ARCH_INDEX(*i1++ ^ t1)];
#if LOTUS_N == 3
t2 = *o2++ = lotus_magic_table[ARCH_INDEX(*i2++ ^ t2)];
#endif
t0 = *o0++ = lotus_magic_table[ARCH_INDEX(*i0++ ^ t0)];
t1 = *o1++ = lotus_magic_table[ARCH_INDEX(*i1++ ^ t1)];
#if LOTUS_N == 3
t2 = *o2++ = lotus_magic_table[ARCH_INDEX(*i2++ ^ t2)];
#endif
}
}
/* The mixing function: perturbs the first three rows of the matrix*/
#if LOTUS_N == 3
static void lotus_mix (unsigned char *m0, unsigned char *m1,
unsigned char *m2)
#else
static void lotus_mix (unsigned char *m0, unsigned char *m1)
#endif
{
unsigned char t0, t1;
unsigned char *p0, *p1;
#if LOTUS_N == 3
unsigned char t2;
unsigned char *p2;
#endif
int i, j;
#if LOTUS_N == 3
t0 = t1 = t2 = 0;
#else
t0 = t1 = 0;
#endif
for (i = 18; i > 0; i--)
{
p0 = m0;
p1 = m1;
#if LOTUS_N == 3
p2 = m2;
#endif
for (j = 48; j > 0; j--)
{
t0 = p0[0] ^= lotus_magic_table[ARCH_INDEX(j + t0)];
t1 = p1[0] ^= lotus_magic_table[ARCH_INDEX(j + t1)];
#if LOTUS_N == 3
t2 = p2[0] ^= lotus_magic_table[ARCH_INDEX(j + t2)];
#endif
j--;
t0 = p0[1] ^= lotus_magic_table[ARCH_INDEX(j + t0)];
p0 += 2;
t1 = p1[1] ^= lotus_magic_table[ARCH_INDEX(j + t1)];
p1 += 2;
#if LOTUS_N == 3
t2 = p2[1] ^= lotus_magic_table[ARCH_INDEX(j + t2)];
p2 += 2;
#endif
}
}
}
/*the last public function; generates ciphertext*/
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += LOTUS_N) {
struct {
union {
unsigned char m[64];
unsigned char m4[4][16];
ARCH_WORD m4w[4][16 / ARCH_SIZE];
} u;
} ctx[LOTUS_N];
int password_length;
memset(ctx[0].u.m4[0], 0, 16);
password_length = strlen(saved_key[index]);
memset(ctx[0].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[0].u.m4[1], saved_key[index], password_length);
memcpy(ctx[0].u.m4[2], ctx[0].u.m4[1], 16);
memset(ctx[1].u.m4[0], 0, 16);
password_length = strlen(saved_key[index + 1]);
memset(ctx[1].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[1].u.m4[1], saved_key[index + 1], password_length);
memcpy(ctx[1].u.m4[2], ctx[1].u.m4[1], 16);
#if LOTUS_N == 3
memset(ctx[2].u.m4[0], 0, 16);
password_length = strlen(saved_key[index + 2]);
memset(ctx[2].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[2].u.m4[1], saved_key[index + 2], password_length);
memcpy(ctx[2].u.m4[2], ctx[2].u.m4[1], 16);
lotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],
ctx[1].u.m4[1], ctx[1].u.m4[3],
ctx[2].u.m4[1], ctx[2].u.m4[3]);
lotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);
#else
lotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],
ctx[1].u.m4[1], ctx[1].u.m4[3]);
lotus_mix(ctx[0].u.m, ctx[1].u.m);
#endif
memcpy(ctx[0].u.m4[1], ctx[0].u.m4[3], 16);
memcpy(ctx[1].u.m4[1], ctx[1].u.m4[3], 16);
#if LOTUS_N == 3
memcpy(ctx[2].u.m4[1], ctx[2].u.m4[3], 16);
#endif
{
int i;
for (i = 0; i < 16 / ARCH_SIZE; i++) {
ctx[0].u.m4w[2][i] = ctx[0].u.m4w[0][i] ^ ctx[0].u.m4w[1][i];
ctx[1].u.m4w[2][i] = ctx[1].u.m4w[0][i] ^ ctx[1].u.m4w[1][i];
#if LOTUS_N == 3
ctx[2].u.m4w[2][i] = ctx[2].u.m4w[0][i] ^ ctx[2].u.m4w[1][i];
#endif
}
}
#if LOTUS_N == 3
lotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);
#else
lotus_mix(ctx[0].u.m, ctx[1].u.m);
#endif
memcpy(crypt_key[index], ctx[0].u.m4[0], BINARY_SIZE);
memcpy(crypt_key[index + 1], ctx[1].u.m4[0], BINARY_SIZE);
#if LOTUS_N == 3
memcpy(crypt_key[index + 2], ctx[2].u.m4[0], BINARY_SIZE);
#endif
}
return count;
}
static int get_hash1(int index) { return crypt_key[index][0] & 0xf; }
static int get_hash2(int index) { return crypt_key[index][0] & 0xff; }
static int get_hash3(int index) { return crypt_key[index][0] & 0xfff; }
static int get_hash4(int index) { return crypt_key[index][0] & 0xffff; }
static int get_hash5(int index) { return crypt_key[index][0] & 0xfffff; }
static int get_hash6(int index) { return crypt_key[index][0] & 0xffffff; }
static int get_hash7(int index) { return crypt_key[index][0] & 0x7ffffff; }
static int binary_hash1(void * binary) { return *(ARCH_WORD_32 *)binary & 0xf; }
static int binary_hash2(void * binary) { return *(ARCH_WORD_32 *)binary & 0xff; }
static int binary_hash3(void * binary) { return *(ARCH_WORD_32 *)binary & 0xfff; }
static int binary_hash4(void * binary) { return *(ARCH_WORD_32 *)binary & 0xffff; }
static int binary_hash5(void * binary) { return *(ARCH_WORD_32 *)binary & 0xfffff; }
static int binary_hash6(void * binary) { return *(ARCH_WORD_32 *)binary & 0xffffff; }
static int binary_hash7(void * binary) { return *(ARCH_WORD_32 *)binary & 0x7ffffff; }
/* C's version of a class specifier */
struct fmt_main fmt_lotus5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
binary_hash1,
binary_hash2,
binary_hash3,
binary_hash4,
binary_hash5,
binary_hash6,
binary_hash7
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash1,
get_hash2,
get_hash3,
get_hash4,
get_hash5,
get_hash6,
get_hash7
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
_unroll02.c | void axpy(int n, double *y, double a, double *x) {
register int i;
/*@ begin Loop(transform Unroll(ufactor=5, parallelize=True)
for (i=0; i<=n-1; i++)
y[i]+=a*x[i];
) @*/
{
int i;
#pragma omp parallel for private(i)
for (i=0; i<=n-5; i=i+5) {
y[i]=y[i]+a*x[i];
y[(i+1)]=y[(i+1)]+a*x[(i+1)];
y[(i+2)]=y[(i+2)]+a*x[(i+2)];
y[(i+3)]=y[(i+3)]+a*x[(i+3)];
y[(i+4)]=y[(i+4)]+a*x[(i+4)];
}
for (i=n-((n-(0))%5); i<=n-1; i=i+1)
y[i]=y[i]+a*x[i];
}
/*@ end @*/
}
|
GB_binop__land_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__land_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__land_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__land_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp32)
// A*D function (colscale): GB (_AxD__land_fp32)
// D*A function (rowscale): GB (_DxB__land_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__land_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__land_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp32)
// C=scalar+B GB (_bind1st__land_fp32)
// C=scalar+B' GB (_bind1st_tran__land_fp32)
// C=A+scalar GB (_bind2nd__land_fp32)
// C=A'+scalar GB (_bind2nd_tran__land_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_FP32 || GxB_NO_LAND_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int16)
// A*D function (colscale): GB (_AxD__rdiv_int16)
// D*A function (rowscale): GB (_DxB__rdiv_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int16)
// C=scalar+B GB (_bind1st__rdiv_int16)
// C=scalar+B' GB (_bind1st_tran__rdiv_int16)
// C=A+scalar GB (_bind2nd__rdiv_int16)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (y, x, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT16 || GxB_NO_RDIV_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 16) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 16) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lpa_on_signal.c | /* lpa_on_signal.c -- ECOZ System
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
//#undef PAR
#define PAR 1
#ifdef PAR
#include <omp.h>
#endif
#include "lpc.h"
#include "utl.h"
// M_PI dropped in C99 (?)
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
static inline void create_hamming(sample_t *hamming, int winSize) {
sample_t *h = hamming;
sample_t *const limit = hamming + winSize;
int n = 0;
while (h < limit) {
*h++ = .54 - .46 * cos((n++ * 2 * M_PI) / (winSize - 1));
}
}
static inline void fill_frame(sample_t *samples, int numSamples, sample_t *frame) {
sample_t *s = samples;
sample_t *f = frame;
sample_t *const limit = frame + numSamples;
while (f < limit) {
*f++ = *s++;
}
}
static inline void remove_mean(sample_t *frame, int numSamples) {
sample_t *const limit = frame + numSamples;
sample_t sum = 0;
sample_t *f = frame;
while (f < limit) {
sum += *f++;
}
const sample_t mean = sum / numSamples;
f = frame;
while (f < limit) {
*f++ -= mean;
}
}
static inline void preemphasis(sample_t *frame, int numSamples) {
sample_t *const limit = frame + numSamples;
// x[n]
sample_t *x_n = limit - 1;
// x[n-1]
sample_t *x_n1 = x_n - 1;
while (x_n > frame) {
*x_n-- -= .95 * *x_n1--;
}
}
static inline void apply_hamming(sample_t *hamming, sample_t *frame, int numSamples) {
sample_t *f = frame;
sample_t *h = hamming;
sample_t *const limit = frame + numSamples;
while (f < limit) {
*f++ *= *h++;
}
}
Predictor *lpa_on_signal(int P, int windowLengthMs, int offsetLengthMs, Sgn *sgn, int verbose) {
sample_t *signal = sgn->samples;
const long numSamples = sgn->numSamples;
const long sampleRate = sgn->sampleRate;
// number of samples corresponding to windowLengthMs:
const int winSize = (int) ((windowLengthMs * sampleRate) / 1000);
// number of samples corresponding to offsetLengthMs:
const int offset = (int) ((offsetLengthMs * sampleRate) / 1000);
if (winSize > numSamples) {
fprintf(stderr, "ERROR: lpa_on_signal: signal too short\n");
return 0;
}
// total number of frames:
int T = (int) (numSamples - (winSize - offset)) / offset;
// discard last section if incomplete:
if ((T - 1) * offset + winSize > numSamples) {
T--;
}
Predictor *predictor = prd_create(T, P, ""); // "" = unknown className
if (!predictor) {
fprintf(stderr, "lpa_on_signal: cannot get predictor object\n");
return 0;
}
if (verbose) {
printf("lpa_on_signal: P=%d numSamples=%ld sampleRate=%ld winSize=%d offset=%d T=%d\n",
P, numSamples, sampleRate, winSize, offset, T);
}
sample_t hamming[winSize];
create_hamming(hamming, winSize);
#ifdef PAR
#pragma omp parallel for
#endif
for (int t = 0; t < T; ++t) {
sample_t *samples = signal + t * offset;
// perform linear prediction to each frame:
sample_t frame[winSize];
fill_frame(samples, winSize, frame);
remove_mean(frame, winSize);
preemphasis(frame, winSize);
apply_hamming(hamming, frame, winSize);
// do LPA:
sample_t reflex[P + 1]; // reflection coefficients
sample_t pred[P + 1]; // prediction coefficients
sample_t errPred; // prediction error
sample_t *vector = predictor->vectors[t];
int res_lpca = lpca(frame, winSize, P, vector, reflex, pred, &errPred);
if (0 != res_lpca) {
fprintf(stderr, "ERROR: lpa_on_signal: lpca error = %d\n", res_lpca);
//break; error: break statement used with OpenMP for loop
}
// normalize autocorrelation sequence by gain:
if (errPred != 0.) {
sample_t *v = vector;
sample_t *const limit = v + P;
while (v <= limit) {
*v++ /= errPred;
}
}
}
if (verbose) {
printf("lpa_on_signal: %d total frames processed\n", T);
}
return predictor;
}
|
nusPerfect.cpp.pluto.c | #include <omp.h>
#pragma warning(disable : 4996)
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define _CRT_SECURE_NO_WARNINGS
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
#define S0(a, i, j, k) c[i][j] = c[i][k] + c[k][j]
//#define match(b1, b2) (((b1)+(b2)) == 3 ? 1 : 0)
#define sigma(i, j) (match(seq[i], seq[j]))
int max_score(int s1, int s2)
{
if (s1 >= s2)
return s1;
return s2;
}
int max_sc(int s1, int s2, int s3) {
if (s1>=s2 && s1>=s3)
return s1;
if (s2>=s3)
return s2;
return s3;
}
int match(const int e1, const int e2)
{
/*
* 'A' => 0 -> bitowo 0001 -> 1
* 'G' => 1 -> bitowo 0010 -> 2
* 'C' => 2 -> bitowo 0100 -> 4
* 'U' => 3 -> bitowo 1000 -> 8
*/
//const bool match =
// (e1 == 0 && e2 == 3) || (e1 == 3 && e2 == 0) ||
// (e1 == 1 && e2 == 2) || (e1 == 2 && e2 == 1) ||
// (e1 == 1 && e2 == 3) || (e1 == 3 && e2 == 1);
//return match;
const int match =
(e1 + e2 == 9) ||
(e1 + e2 == 6) ||
(e1 + e2 == 10) ;
return match;
//(e1 == "A" && e2 == "U") ||
//(e1 == "U" && e2 == "A") ||
//(e1 == "G" && e2 == "C") ||
//(e1 == "C" && e2 == "G") ||
//(e1 == "G" && e2 == "U") ||
//(e1 == "U" && e2 == "G");
}
void printMatrix(int**, int, int);
int ** getFullCopy(int ** table, int N);
int** allocateMatrix(int);
void deallocateMatrix(int**, int);
void write_results_full(int , double , char );
void write_results(int , double );
void computeDYN1PerfectNoIf(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
//Listing 1.2: Perfectly nested Nussinov loops
int t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
if (n >= 2) {
for (t2=ceild(-n-23,26);t2<=0;t2++) {
lbp=max(0,ceild(-13*t2-26,15));
ubp=floord(n-1,30);
#pragma omp parallel for private(lbv,ubv,t5,t6,t7,t8,t9,t10)
for (t4=lbp;t4<=ubp;t4++) {
for (t5=max(max(26*t2,-n+2),-30*t4-28);t5<=min(0,26*t2+25);t5++) {
for (t7=max(30*t4,-t5+1);t7<=min(n-1,30*t4+29);t7++) {
for (t9=-t5;t9<=t7-1;t9++) {
S[-t5][t7] = max_sc(S[-t5][t9] + S[t9+1][t7], S[-t5][t7], max_score(S[-t5][t7], S[-t5+1][t7-1] + sigma(-t5, t7)));;
}
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("PERNIF: %lf\n", execution_time);
write_results_full(n, execution_time, '\n');
printMatrix(S, n, 1);
deallocateMatrix(S, n);
}
void computeDYN2PerfectIf(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
//Listing 1.2: Perfectly nested Nussinov loops
int t1, t2, t3, t4, t5, t6;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
if (n >= 3) {
for (t1=1;t1<=n-2;t1++) {
lbp=ceild(t1-25,26);
ubp=floord(-t1+2*n-2,26);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(-t1+13*t2-28,30)),ceild(26*t2-n-28,30));t3<=min(floord(-t1+n-1,30),floord(-t1+26*t2+25,60));t3++) {
if (t1 >= 2) {
for (t4=max(30*t3,-t1+13*t2+1);t4<=min(min(-2*t1+n,30*t3+29),-t1+13*t2+13);t4++) {
lbv=max(26*t2,t1+2*t4);
ubv=2*t1+2*t4-2;
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
}
S[t4][(2*t1+t4-1)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(2*t1+t4-1)], S[t4][(2*t1+t4-1)], S[t4 + 1][(2*t1+t4-1) - 1] + sigma(t4, (2*t1+t4-1)));;
}
}
for (t4=max(max(30*t3,-2*t1+n+1),26*t2-n+1);t4<=min(min(30*t3+29,-t1+n-1),-t1+13*t2+13);t4++) {
lbv=max(26*t2,t1+2*t4);
ubv=t4+n-1;
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
}
}
for (t4=max(max(30*t3,-t1+13*t2+14),26*t2-n+1);t4<=min(min(floord(-t1+26*t2+25,2),30*t3+29),-t1+n-1);t4++) {
lbv=max(26*t2,t1+2*t4);
ubv=min(26*t2+25,t4+n-1);
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
}
}
if (t1 == 1) {
for (t4=max(13*t2,30*t3);t4<=min(min(n-2,13*t2+12),30*t3+29);t4++) {
S[t4][(t4+1)] = max_sc(S[t4][1 + t4 - 1] + S[1 + t4 - 1 + 1][(t4+1)], S[t4][(t4+1)], S[t4 + 1][(t4+1) - 1] + sigma(t4, (t4+1)));;
}
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("PERWIF: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(S, n, 2);
deallocateMatrix(S, n);
}
void printMatrix(int** matrix, int N, int fileno) {
char filename[10];
sprintf(filename, "nontiled%d", fileno);
FILE* f = fopen(filename, "wt");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
fprintf(f, "%d ", matrix[i][j]);
fprintf(f, "\n");
}
fclose(f);
}
int **getFullCopy(int ** table, int N)
{
int **S = allocateMatrix(N);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
S[i][j] = table[i][j];
return S;
}
int** allocateMatrix(int N) {
int** t = (int**)malloc(sizeof(int*) * N);
for (int i = 0; i < N; i++) {
t[i] = (int*)malloc(sizeof(int) * N);
}
return t;
}
int* allocateVector(int N) {
int* t = (int*)malloc(sizeof(int) * N);
return t;
}
void deallocateMatrix(int **t, int N) {
for (int i = 0; i < N; i++) {
free(t[i]);
}
free(t);
}
void write_results_full(int n, double execution_time, char end_char)
{
FILE* f = fopen("results.txt", "at");
fprintf(f, "%d:%lf%c", n, execution_time, end_char);
fclose(f);
}
void write_results(int n, double execution_time)
{
write_results_full(n, execution_time, ';');
}
int getValue(const char c)
{
/*
* 'A' => 0 -> bitowo 0001 -> 1
* 'G' => 1 -> bitowo 0010 -> 2
* 'C' => 2 -> bitowo 0100 -> 4
* 'U' => 3 -> bitowo 1000 -> 8
*/
if(c=='A') return 1;
if(c=='G') return 2;
if(c=='C') return 4;
if(c=='U') return 8;
return 16;
}
#define PERFORMANCE_TEST 1
int main(void) {
#if PERFORMANCE_TEST==1
const int ZMAX = 1600;
#else
const int ZMAX = 16;
#endif
int** graph = allocateMatrix(ZMAX);
int* seq = allocateVector(ZMAX);
for (int i = 0; i < ZMAX; i++)
for (int j = 0; j < ZMAX; j++)
graph[i][j] = 0;
for (int i = 0; i < ZMAX; i++)
graph[i][i] = 0;
//
const char* seqTest = "GCGUCCACGGCUAGCU";
#if PERFORMANCE_TEST==1
for (int i=0 ; i<ZMAX ; i++)
{
seq[i] = 1 << (rand()%4+1);
}
#else
for (int i = 0; i < ZMAX; i++)
seq[i] = getValue(seqTest[i]);
#endif
int N = ZMAX - 10;
//while (N < ZMAX)
//{
N += 10;
computeDYN1Imperfect(graph, N, seq);
computeDYN2Perfect(graph, N, seq);
computeDYN3ImperfA(graph, N, seq);
computeDYN4ImperfB(graph, N, seq);
//N += 10;
//}
deallocateMatrix(graph, ZMAX);
free(seq);
return 0;
}
|
omp.cats | /* ****** ****** */
/*
(*
** Permission to use, copy, modify, and distribute this software for any
** purpose with or without fee is hereby granted, provided that the above
** copyright notice and this permission notice appear in all copies.
**
** THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
** WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
** MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
** ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
** WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
** ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
** OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*)
*/
/* ****** ****** */
/*
** Author: Brandon Barker
** Authoremail: bbarkerATgmailDOTcom
** Start time: May, 2014
*/
/* ****** ****** */
/*
** Author: Hongwei Xi
** Authoremail: gmhwxiATgmailDOTcom
** Start time: June, 2014
*/
/* ****** ****** */
#ifndef OPENMP_OMP_CATS
#define OPENMP_OMP_CATS
/* ****** ****** */
#include <omp.h>
/* ****** ****** */
//
// BB: Utilities
//
#define atscntrb_openmp_STR(x) #x
#define atscntrb_openmp_STRINGIFY(x) atscntrb_openmp_STR(x)
#define atscntrb_openmp_CONCATFUN(X, Y) X ( Y )
//
/* ****** ****** */
#define \
atscntrb_openmp_omp_get_num_procs() omp_get_num_procs()
/* ****** ****** */
#define \
atscntrb_openmp_omp_get_num_threads() omp_get_num_threads()
#define \
atscntrb_openmp_omp_set_num_threads(n) omp_set_num_threads(n)
/* ****** ****** */
#define \
atscntrb_openmp_omp_get_thread_num() omp_get_thread_num()
/* ****** ****** */
//
#define \
atscntrb_openmp_pragma_omp_barrier() \
_Pragma(atscntrb_openmp_STRINGIFY(omp barrier))
//
/* ****** ****** */
//
// #pragma omp parallel private(tid)
//
#define \
atscntrb_openmp_pragma_omp_parallel_private_beg(tid) \
_Pragma(atscntrb_openmp_STRINGIFY(atscntrb_openmp_CONCATFUN(omp parallel private, tid))) {
//
#define \
atscntrb_openmp_pragma_omp_parallel_private_end(tid) }
//
/* ****** ****** */
#endif // ifndef OPENMP_OMP_CATS
/* ****** ****** */
/* end of [omp.cats] */
|
AssemblerParallel.h | //
// AssemblerParrallel.h
// Gauss
//
// Created by David Levin on 6/6/17.
//
//
#ifndef AssemblerParallel_h
#define AssemblerParallel_h
#ifdef GAUSS_OPENMP
#include <omp.h>
#include <Assembler.h>
#include <CoreDefines.h>
#include <Utilities.h>
#include <UtilitiesOMP.h>
#include <World.h>
namespace Gauss {
//A parallel assembler just uses one serial assembler per available thread
template<typename SerialAssembler>
class AssemblerParallelImpl : public AssemblerBase {
public:
using MatrixType = typename SerialAssembler::MatrixType;
AssemblerParallelImpl() {
//Number of available theads
std::cout<<"Number of Available Threads: "<<omp_thread_count()<<"\n";
m_serialAssemblers.resize(omp_thread_count());
}
inline void init(unsigned int m, unsigned int n=1, unsigned int rowOffset = 0, unsigned int colOffset = 0) {
//do everything in parallel
m_assembled.resize(m,n); // TODO need conditional to deal with case where MatrixType is a vector
m_assembled.setZero();
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
m_serialAssemblers[ii].init(m,n, rowOffset, colOffset);
}
}
inline void finalize() {
//build giant triplets list and set it up
//I think I want to assemble seperately then add (split up my setTriplets time)
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
m_serialAssemblers[ii].finalize();
}
}
for(unsigned int ii=0; ii<m_serialAssemblers.size(); ++ii) {
m_assembled += (*m_serialAssemblers[ii]);
}
}
//convenient overloads
inline auto & getMatrix() {
return m_assembled;
}
template<typename I, typename J, typename Input>
inline void assemble(I &i, J &j, Input &toAssembler) {
m_serialAssemblers[0].getImpl().assemble(i,j, toAssembler); //default single threaded behavior
//exit(1);
}
template<typename I, typename Input>
inline void assemble(I &i, Input &toAssembler) {
m_serialAssemblers[0].getImpl().assemble(i, toAssembler); //default single threaded behavior
//exit(1);
}
//next step, this needs to change to take in a list of i's, j's and sizes
//take in std::vectors for indices and size
inline void setOffset(unsigned int rowOffset, unsigned int colOffset = 0) {
AssemblerBase::setOffset(rowOffset, colOffset);
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
m_serialAssemblers[ii].setOffset(rowOffset, colOffset);
}
}
}
inline SerialAssembler & operator[](unsigned int threadId) {
return m_serialAssemblers[threadId];
}
SerialAssembler & getAssembler(unsigned int threadId) {
return m_serialAssemblers[threadId];
}
//For MVP assemblers
inline void setX(MatrixType &x) {
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
(m_serialAssemblers[ii].getImpl().setX(x));
}
}
}
//handle operators
template<typename Params>
inline AssemblerParallelImpl & operator*=(Params &x) {
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
(m_serialAssemblers[ii].getImpl())*=x;
}
}
return *this;
}
protected:
std::vector<SerialAssembler> m_serialAssemblers;
//At some point I should figure out this type based on the constituent asemblers but for now just assume Eigen
typename SerialAssembler::ImplType::MatrixType m_assembled;
private:
};
template<typename DataType, typename SerialAssembler>
using AssemblerParallel = Assembler<DataType, AssemblerParallelImpl<SerialAssembler> >;
template<typename DataType, typename SerialAssembler>
struct IsParallel<Assembler<DataType, AssemblerParallelImpl<SerialAssembler> > > {
public:
constexpr static bool value = true;
};
}
#else
template<typename DataType, typename SerialAssembler>
using AssemblerParallel = SerialAssembler;
#endif //OPENMP is Available
#endif /* AssemblerParrallel_h */
|
blas2_sparseblockmat.h | #ifndef _DG_BLAS_SPARSEBLOCKMAT_
#define _DG_BLAS_SPARSEBLOCKMAT_
#include "tensor_traits.h"
#include "tensor_traits.h"
#include "sparseblockmat.h"
#include "sparseblockmat.cuh"
//
///@cond
namespace dg{
namespace blas2{
namespace detail{
template<class Matrix1, class Matrix2>
inline void doTransfer( const Matrix1& x, Matrix2& y, AnyMatrixTag, SparseBlockMatrixTag)
{
y = (Matrix2)x; //try to invoke the explicit conversion construction
}
template< class Matrix, class Vector1, class Vector2>
inline void doSymv_dispatch(
get_value_type<Vector1> alpha,
Matrix&& m,
const Vector1& x,
get_value_type<Vector1> beta,
Vector2& y,
SparseBlockMatrixTag,
SharedVectorTag,
AnyPolicyTag)
{
using value_type = get_value_type<Vector1>;
int size_x = x.size();
int size_y = y.size();
if( size_x != m.total_num_cols()) {
throw Error( Message(_ping_)<<"x has the wrong size "<<x.size()<<" and not "<<m.total_num_cols());
}
if( size_y != m.total_num_rows()) {
throw Error( Message(_ping_)<<"y has the wrong size "<<y.size()<<" and not "<<m.total_num_rows());
}
const value_type * x_ptr = thrust::raw_pointer_cast(x.data());
value_type * y_ptr = thrust::raw_pointer_cast(y.data());
m.symv( SharedVectorTag(), get_execution_policy<Vector1>(), alpha, x_ptr, beta, y_ptr);
}
template< class Matrix, class Vector1, class Vector2>
inline void doSymv_dispatch(
get_value_type<Vector1> alpha,
Matrix&& m,
const Vector1& x,
get_value_type<Vector1> beta,
Vector2& y,
SparseBlockMatrixTag,
RecursiveVectorTag,
AnyPolicyTag)
{
for(unsigned i=0; i<x.size(); i++)
doSymv_dispatch( alpha, std::forward<Matrix>(m), x[i], beta, y[i],
SparseBlockMatrixTag(),
get_tensor_category<typename Vector1::value_type>(),
get_execution_policy<Vector1>());
}
#ifdef _OPENMP
template< class Matrix, class Vector1, class Vector2>
inline void doSymv_dispatch(
get_value_type<Vector1> alpha,
Matrix&& m,
const Vector1& x,
get_value_type<Vector1> beta,
Vector2& y,
SparseBlockMatrixTag,
RecursiveVectorTag,
OmpTag)
{
if( !omp_in_parallel())
{
#pragma omp parallel
{
for(unsigned i=0; i<x.size(); i++)
doSymv_dispatch( alpha, std::forward<Matrix>(m), x[i], beta, y[i],
SparseBlockMatrixTag(),
get_tensor_category<typename Vector1::value_type>(),
OmpTag());
}
}
else
for(unsigned i=0; i<x.size(); i++)
doSymv_dispatch( alpha, std::forward<Matrix>(m), x[i], beta, y[i],
SparseBlockMatrixTag(),
get_tensor_category<typename Vector1::value_type>(),
OmpTag());
}
#endif//_OPENMP
template< class Matrix, class Vector1, class Vector2>
inline void doSymv(
get_value_type<Vector1> alpha,
Matrix&& m,
const Vector1& x,
get_value_type<Vector1> beta,
Vector2& y,
SparseBlockMatrixTag)
{
doSymv_dispatch(alpha, std::forward<Matrix>(m), x, beta, y,
SparseBlockMatrixTag(),
get_tensor_category<Vector1>(),
get_execution_policy<Vector1>()
);
}
template< class Matrix, class Vector1, class Vector2>
inline void doSymv(
Matrix&& m,
const Vector1& x,
Vector2& y,
SparseBlockMatrixTag)
{
doSymv( 1., std::forward<Matrix>(m), x, 0., y, SparseBlockMatrixTag());
}
} //namespace detail
} //namespace blas2
} //namespace dg
///@endcond
//
#endif//_DG_BLAS_SPARSEBLOCKMAT_
|
GB_binop__lt_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_uint64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint64)
// A*D function (colscale): GB (_AxD__lt_uint64)
// D*A function (rowscale): GB (_DxB__lt_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint64)
// C=scalar+B GB (_bind1st__lt_uint64)
// C=scalar+B' GB (_bind1st_tran__lt_uint64)
// C=A+scalar GB (_bind2nd__lt_uint64)
// C=A'+scalar GB (_bind2nd_tran__lt_uint64)
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT64 || GxB_NO_LT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bt.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - BT
This benchmark is an OpenMP C version of the NPB BT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: R. Van der Wijngaart
T. Harris
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
/* global variables */
#include "header.h"
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void compute_rhs(void);
static void set_constants(void);
static void verify(int no_time_steps, char *cclass, boolean *verified);
static void x_solve(void);
static void x_backsubstitute(void);
static void x_solve_cell(void);
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]);
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
static void binvrhs(double lhs[5][5], double r[5]);
static void y_solve(void);
static void y_backsubstitute(void);
static void y_solve_cell(void);
static void z_solve(void);
static void z_backsubstitute(void);
static void z_solve_cell(void);
/*--------------------------------------------------------------------
program BT
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step, n3;
int nthreads = 1;
double navg, mflops;
double tmax;
boolean verified;
char cclass;
FILE *fp;
/*--------------------------------------------------------------------
c Root node reads input file (if it exists) else takes
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - BT Benchmark\n\n");
fp = fopen("inputbt.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputbt.data");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lg", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputbt.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if (grid_points[0] > IMAX ||
grid_points[1] > JMAX ||
grid_points[2] > KMAX) {
printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
#pragma omp parallel
{
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
} /* end parallel */
timer_clear(1);
timer_start(1);
#pragma omp parallel firstprivate(niter) private(step)
{
for (step = 1; step <= niter; step++) {
if (step%20 == 0 || step == 1) {
#pragma omp master
printf(" Time step %4d\n", step);
}
adi();
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &cclass, &verified);
n3 = grid_points[0]*grid_points[1]*grid_points[2];
navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0;
if ( tmax != 0.0 ) {
mflops = 1.0e-6*(double)niter*
(3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax;
} else {
mflops = 0.0;
}
c_print_results("BT", cclass, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void add(void) {
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
int i, j, k, m;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[i][j][k][m] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
add = rhs[i][j][k][m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] +
buf[i][3] * buf[i][3];
q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +
buf[i][3]*ue[i][3]);
}
for (i = 1; i < grid_points[0]-1; i++) {
im1 = i-1;
ip1 = i+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tx2*(ue[ip1][1]-ue[im1][1])+
dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-
(ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+
xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+
dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+
xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+
dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+
xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+
dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-
buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+
0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+
dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);
i = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[i-1][m] + 6.0*ue[i][m] -
4.0*ue[i+1][m] + ue[i+2][m]);
}
for (m = 0; m < 5; m++) {
for (i = 1*3; i <= grid_points[0]-3*1-1; i++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m]);
i = grid_points[0]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] +
buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +
buf[j][3]*ue[j][3]);
}
for (j = 1; j < grid_points[1]-1; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
ty2*( ue[jp1][2]-ue[jm1][2] )+
dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+
yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+
dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-
(ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+
yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+
dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+
yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+
dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-
buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+
0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+
buf[jm1][0])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+
dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);
j = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[j-1][m] + 6.0*ue[j][m] -
4.0*ue[j+1][m] + ue[j+2][m]);
}
for (m = 0; m < 5; m++) {
for (j = 1*3; j <= grid_points[1]-3*1-1; j++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m]);
j = grid_points[1]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] +
buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +
buf[k][3]*ue[k][3]);
}
for (k = 1; k < grid_points[2]-1; k++) {
km1 = k-1;
kp1 = k+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tz2*( ue[kp1][3]-ue[km1][3] )+
dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+
zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+
dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+
zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+
dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-
(ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+
zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+
dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-
buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+
0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]
+buf[km1][0])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+
dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);
k = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[k-1][m] + 6.0*ue[k][m] -
4.0*ue[k+1][m] + ue[k+2][m]);
}
for (m = 0; m < 5; m++) {
for (k = 1*3; k <= grid_points[2]-3*1-1; k++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m]);
k = grid_points[2]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] +
xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]
+ xi*ce[m][10]))) +
eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]
+ eta*ce[m][11])))+
zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] +
zeta*ce[m][12])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < IMAX; i++) {
for (j = 0; j < IMAX; j++) {
for (k = 0; k < IMAX; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&(Pface[ix][0][0]));
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
i = 0;
xi = 0.0;
#pragma omp for nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
i = grid_points[0]-1;
xi = 1.0;
#pragma omp for
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
j = 0;
eta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
j = grid_points[1]-1;
eta = 1.0;
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
k = 0;
zeta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
k = grid_points[2]-1;
zeta = 1.0;
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
int i, j, k, m, n;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zero the whole left hand side for starters
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
lhs[i][j][k][0][m][n] = 0.0;
lhs[i][j][k][1][m][n] = 0.0;
lhs[i][j][k][2][m][n] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but convenient
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
lhs[i][j][k][1][m][m] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side in the xi-direction
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c determine a (labeled f) and n jacobians
c-------------------------------------------------------------------*/
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 1.0;
fjac[ i][ j][ k][0][2] = 0.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 *
u[i][j][k][1])
+ c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2;
fjac[i][j][k][1][1] = ( 2.0 - c2 )
* ( u[i][j][k][1] / u[i][j][k][0] );
fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );
fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][1] * tmp1 );
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( 3.0*u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in x direction
c-------------------------------------------------------------------*/
for (i = 1; i < grid_points[0]-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]
- tmp1 * njac[i-1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]
- tmp1 * njac[i-1][j][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]
- tmp1 * njac[i-1][j][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]
- tmp1 * njac[i-1][j][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]
- tmp1 * njac[i-1][j][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]
- tmp1 * njac[i-1][j][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]
- tmp1 * njac[i-1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]
- tmp1 * njac[i-1][j][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]
- tmp1 * njac[i-1][j][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]
- tmp1 * njac[i-1][j][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]
- tmp1 * njac[i-1][j][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]
- tmp1 * njac[i-1][j][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]
- tmp1 * njac[i-1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]
- tmp1 * njac[i-1][j][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]
- tmp1 * njac[i-1][j][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]
- tmp1 * njac[i-1][j][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]
- tmp1 * njac[i-1][j][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]
- tmp1 * njac[i-1][j][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]
- tmp1 * njac[i-1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]
- tmp1 * njac[i-1][j][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]
- tmp1 * njac[i-1][j][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]
- tmp1 * njac[i-1][j][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]
- tmp1 * njac[i-1][j][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]
- tmp1 * njac[i-1][j][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]
- tmp1 * njac[i-1][j][k][4][4]
- tmp1 * dx5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]
- tmp1 * njac[i+1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]
- tmp1 * njac[i+1][j][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]
- tmp1 * njac[i+1][j][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]
- tmp1 * njac[i+1][j][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]
- tmp1 * njac[i+1][j][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]
- tmp1 * njac[i+1][j][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]
- tmp1 * njac[i+1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]
- tmp1 * njac[i+1][j][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]
- tmp1 * njac[i+1][j][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]
- tmp1 * njac[i+1][j][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]
- tmp1 * njac[i+1][j][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]
- tmp1 * njac[i+1][j][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]
- tmp1 * njac[i+1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]
- tmp1 * njac[i+1][j][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]
- tmp1 * njac[i+1][j][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]
- tmp1 * njac[i+1][j][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]
- tmp1 * njac[i+1][j][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]
- tmp1 * njac[i+1][j][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]
- tmp1 * njac[i+1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]
- tmp1 * njac[i+1][j][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]
- tmp1 * njac[i+1][j][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]
- tmp1 * njac[i+1][j][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]
- tmp1 * njac[i+1][j][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]
- tmp1 * njac[i+1][j][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]
- tmp1 * njac[i+1][j][k][4][4]
- tmp1 * dx5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the tri-diagonal matrix;
c determine a (labeled f) and n jacobians for cell c
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 0.0;
fjac[ i][ j][ k][0][2] = 1.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][3] = 0.0;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][2][2] = ( 2.0 - c2 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;
fjac[i][j][k][2][4] = c2;
fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][3][1] = 0.0;
fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * u[i][j][k][4] * tmp1 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2]
* tmp2;
fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ 3.0 * u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = con43 * c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
}
}
/*--------------------------------------------------------------------
c now joacobians set, so form left hand side in y direction
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]
- tmp1 * njac[i][j-1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]
- tmp1 * njac[i][j-1][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]
- tmp1 * njac[i][j-1][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]
- tmp1 * njac[i][j-1][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]
- tmp1 * njac[i][j-1][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]
- tmp1 * njac[i][j-1][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]
- tmp1 * njac[i][j-1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]
- tmp1 * njac[i][j-1][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]
- tmp1 * njac[i][j-1][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]
- tmp1 * njac[i][j-1][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]
- tmp1 * njac[i][j-1][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]
- tmp1 * njac[i][j-1][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]
- tmp1 * njac[i][j-1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]
- tmp1 * njac[i][j-1][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]
- tmp1 * njac[i][j-1][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]
- tmp1 * njac[i][j-1][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]
- tmp1 * njac[i][j-1][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]
- tmp1 * njac[i][j-1][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]
- tmp1 * njac[i][j-1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]
- tmp1 * njac[i][j-1][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]
- tmp1 * njac[i][j-1][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]
- tmp1 * njac[i][j-1][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]
- tmp1 * njac[i][j-1][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]
- tmp1 * njac[i][j-1][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]
- tmp1 * njac[i][j-1][k][4][4]
- tmp1 * dy5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dy1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dy2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dy3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dy4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dy5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]
- tmp1 * njac[i][j+1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]
- tmp1 * njac[i][j+1][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]
- tmp1 * njac[i][j+1][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]
- tmp1 * njac[i][j+1][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]
- tmp1 * njac[i][j+1][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]
- tmp1 * njac[i][j+1][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]
- tmp1 * njac[i][j+1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]
- tmp1 * njac[i][j+1][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]
- tmp1 * njac[i][j+1][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]
- tmp1 * njac[i][j+1][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]
- tmp1 * njac[i][j+1][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]
- tmp1 * njac[i][j+1][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]
- tmp1 * njac[i][j+1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]
- tmp1 * njac[i][j+1][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]
- tmp1 * njac[i][j+1][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]
- tmp1 * njac[i][j+1][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]
- tmp1 * njac[i][j+1][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]
- tmp1 * njac[i][j+1][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]
- tmp1 * njac[i][j+1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]
- tmp1 * njac[i][j+1][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]
- tmp1 * njac[i][j+1][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]
- tmp1 * njac[i][j+1][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]
- tmp1 * njac[i][j+1][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]
- tmp1 * njac[i][j+1][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]
- tmp1 * njac[i][j+1][k][4][4]
- tmp1 * dy5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the block-diagonal matrix;
c determine c (labeled f) and s jacobians
c---------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 )
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );
fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = ( 2.0 - c2 )
* u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ 3.0*u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 )* tmp1;
}
}
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in z direction
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]
- tmp1 * njac[i][j][k-1][0][0]
- tmp1 * dz1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]
- tmp1 * njac[i][j][k-1][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]
- tmp1 * njac[i][j][k-1][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]
- tmp1 * njac[i][j][k-1][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]
- tmp1 * njac[i][j][k-1][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]
- tmp1 * njac[i][j][k-1][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]
- tmp1 * njac[i][j][k-1][1][1]
- tmp1 * dz2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]
- tmp1 * njac[i][j][k-1][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]
- tmp1 * njac[i][j][k-1][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]
- tmp1 * njac[i][j][k-1][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]
- tmp1 * njac[i][j][k-1][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]
- tmp1 * njac[i][j][k-1][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]
- tmp1 * njac[i][j][k-1][2][2]
- tmp1 * dz3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]
- tmp1 * njac[i][j][k-1][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]
- tmp1 * njac[i][j][k-1][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]
- tmp1 * njac[i][j][k-1][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]
- tmp1 * njac[i][j][k-1][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]
- tmp1 * njac[i][j][k-1][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]
- tmp1 * njac[i][j][k-1][3][3]
- tmp1 * dz4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]
- tmp1 * njac[i][j][k-1][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]
- tmp1 * njac[i][j][k-1][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]
- tmp1 * njac[i][j][k-1][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]
- tmp1 * njac[i][j][k-1][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]
- tmp1 * njac[i][j][k-1][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]
- tmp1 * njac[i][j][k-1][4][4]
- tmp1 * dz5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]
- tmp1 * njac[i][j][k+1][0][0]
- tmp1 * dz1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]
- tmp1 * njac[i][j][k+1][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]
- tmp1 * njac[i][j][k+1][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]
- tmp1 * njac[i][j][k+1][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]
- tmp1 * njac[i][j][k+1][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]
- tmp1 * njac[i][j][k+1][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]
- tmp1 * njac[i][j][k+1][1][1]
- tmp1 * dz2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]
- tmp1 * njac[i][j][k+1][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]
- tmp1 * njac[i][j][k+1][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]
- tmp1 * njac[i][j][k+1][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]
- tmp1 * njac[i][j][k+1][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]
- tmp1 * njac[i][j][k+1][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]
- tmp1 * njac[i][j][k+1][2][2]
- tmp1 * dz3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]
- tmp1 * njac[i][j][k+1][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]
- tmp1 * njac[i][j][k+1][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]
- tmp1 * njac[i][j][k+1][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]
- tmp1 * njac[i][j][k+1][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]
- tmp1 * njac[i][j][k+1][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]
- tmp1 * njac[i][j][k+1][3][3]
- tmp1 * dz4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]
- tmp1 * njac[i][j][k+1][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]
- tmp1 * njac[i][j][k+1][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]
- tmp1 * njac[i][j][k+1][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]
- tmp1 * njac[i][j][k+1][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]
- tmp1 * njac[i][j][k+1][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]
- tmp1 * njac[i][j][k+1][4][4]
- tmp1 * dz5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0/u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] +
u[i][j][k][2]*u[i][j][k][2] +
u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 *
(u[i+1][j][k][0] - 2.0*u[i][j][k][0] +
u[i-1][j][k][0]) -
tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 *
(u[i+1][j][k][1] - 2.0*u[i][j][k][1] +
u[i-1][j][k][1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[i+1][j][k][1]*up1 -
u[i-1][j][k][1]*um1 +
(u[i+1][j][k][4]- square[i+1][j][k]-
u[i-1][j][k][4]+ square[i-1][j][k])*
c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 *
(u[i+1][j][k][2] - 2.0*u[i][j][k][2] +
u[i-1][j][k][2]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[i+1][j][k][2]*up1 -
u[i-1][j][k][2]*um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 *
(u[i+1][j][k][3] - 2.0*u[i][j][k][3] +
u[i-1][j][k][3]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[i+1][j][k][3]*up1 -
u[i-1][j][k][3]*um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 *
(u[i+1][j][k][4] - 2.0*u[i][j][k][4] +
u[i-1][j][k][4]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i-1][j][k][4]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[i+1][j][k][4] -
c2*square[i+1][j][k])*up1 -
(c1*u[i-1][j][k][4] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m]);
}
}
}
i = 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);
}
}
}
#pragma omp for nowait
for (i = 3; i < grid_points[0]-3; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m] );
}
}
}
}
i = grid_points[0]-3;
#pragma omp for nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );
}
}
}
i = grid_points[0]-2;
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +
5.0*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 *
(u[i][j+1][k][0] - 2.0*u[i][j][k][0] +
u[i][j-1][k][0]) -
ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 *
(u[i][j+1][k][1] - 2.0*u[i][j][k][1] +
u[i][j-1][k][1]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[i][j+1][k][1]*vp1 -
u[i][j-1][k][1]*vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 *
(u[i][j+1][k][2] - 2.0*u[i][j][k][2] +
u[i][j-1][k][2]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[i][j+1][k][2]*vp1 -
u[i][j-1][k][2]*vm1 +
(u[i][j+1][k][4] - square[i][j+1][k] -
u[i][j-1][k][4] + square[i][j-1][k])
*c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 *
(u[i][j+1][k][3] - 2.0*u[i][j][k][3] +
u[i][j-1][k][3]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[i][j+1][k][3]*vp1 -
u[i][j-1][k][3]*vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 *
(u[i][j+1][k][4] - 2.0*u[i][j][k][4] +
u[i][j-1][k][4]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j-1][k][4]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[i][j+1][k][4] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[i][j-1][k][4] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m]);
}
}
}
j = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 3; j < grid_points[1]-3; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m] );
}
}
}
}
j = grid_points[1]-3;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );
}
}
}
j = grid_points[1]-2;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +
5.*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 *
(u[i][j][k+1][0] - 2.0*u[i][j][k][0] +
u[i][j][k-1][0]) -
tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 *
(u[i][j][k+1][1] - 2.0*u[i][j][k][1] +
u[i][j][k-1][1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[i][j][k+1][1]*wp1 -
u[i][j][k-1][1]*wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 *
(u[i][j][k+1][2] - 2.0*u[i][j][k][2] +
u[i][j][k-1][2]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[i][j][k+1][2]*wp1 -
u[i][j][k-1][2]*wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 *
(u[i][j][k+1][3] - 2.0*u[i][j][k][3] +
u[i][j][k-1][3]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[i][j][k+1][3]*wp1 -
u[i][j][k-1][3]*wm1 +
(u[i][j][k+1][4] - square[i][j][k+1] -
u[i][j][k-1][4] + square[i][j][k-1])
*c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 *
(u[i][j][k+1][4] - 2.0*u[i][j][k][4] +
u[i][j][k-1][4]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j][k-1][4]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[i][j][k+1][4] -
c2*square[i][j][k+1])*wp1 -
(c1*u[i][j][k-1][4] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m]);
}
}
}
k = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 3; k < grid_points[2]-3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m] );
}
}
}
}
k = grid_points[2]-3;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );
}
}
}
k = grid_points[2]-2;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
5.0*u[i][j][k][m] );
}
}
}
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0]-1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
c-------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
c-------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
c-------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02
c-------------------------------------------------------------------*/
if (grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 60) {
*cclass = 'S';
dtref = 1.0e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
/*--------------------------------------------------------------------
c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 24 &&
grid_points[1] == 24 &&
grid_points[2] == 24 &&
no_time_steps == 200) {
*cclass = 'W';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 200) {
*cclass = 'A';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 200 time steps,
c with DT = 3.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 200) {
*cclass = 'B';
dtref = 3.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 200 time steps,
c with DT = 1.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 200) {
*cclass = 'C';
dtref = 1.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
c-------------------------------------------------------------------*/
if (*cclass != 'U') {
printf(" Verification being performed for class %1c\n", *cclass);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
}
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified == TRUE) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c
c Performs line solves in X direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c
c-------------------------------------------------------------------*/
lhsx();
x_solve_cell();
x_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(isize)=rhs[isize)
c else assume U(isize) is loaded in un pack backsub_info
c so just use it
c after call u(istart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (i = grid_points[0]-2; i >= 0; i--) {
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve_cell(void) {
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(IMAX) and rhs'(IMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i,j,k,isize;
isize = grid_points[0]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(0,j,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[0][j][k][BB],
lhs[0][j][k][CC],
rhs[0][j][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (i = 1; i < isize; i++) {
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(i) = rhs(i) - A*rhs(i-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i-1][j][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(i) = B(i) - C(i-1)*A(i)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i-1][j][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(isize) = rhs(isize) - A*rhs(isize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[isize][j][k][AA],
rhs[isize-1][j][k], rhs[isize][j][k]);
/*--------------------------------------------------------------------
c B(isize) = B(isize) - C(isize-1)*A(isize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[isize][j][k][AA],
lhs[isize-1][j][k][CC],
lhs[isize][j][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs() by b_inverse() and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][k][BB],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts bvec=bvec - ablock*avec
c-------------------------------------------------------------------*/
int i;
for (i = 0; i < 5; i++) {
/*--------------------------------------------------------------------
c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell)
c $ - lhs[i,1,ablock,ia,ja,ka,acell)*
c-------------------------------------------------------------------*/
bvec[i] = bvec[i] - ablock[i][0]*avec[0]
- ablock[i][1]*avec[1]
- ablock[i][2]*avec[2]
- ablock[i][3]*avec[3]
- ablock[i][4]*avec[4];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
c-------------------------------------------------------------------*/
int j;
for (j = 0; j < 5; j++) {
cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j]
- ablock[0][1]*bblock[1][j]
- ablock[0][2]*bblock[2][j]
- ablock[0][3]*bblock[3][j]
- ablock[0][4]*bblock[4][j];
cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j]
- ablock[1][1]*bblock[1][j]
- ablock[1][2]*bblock[2][j]
- ablock[1][3]*bblock[3][j]
- ablock[1][4]*bblock[4][j];
cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j]
- ablock[2][1]*bblock[1][j]
- ablock[2][2]*bblock[2][j]
- ablock[2][3]*bblock[3][j]
- ablock[2][4]*bblock[4][j];
cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j]
- ablock[3][1]*bblock[1][j]
- ablock[3][2]*bblock[2][j]
- ablock[3][3]*bblock[3][j]
- ablock[3][4]*bblock[4][j];
cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j]
- ablock[4][1]*bblock[1][j]
- ablock[4][2]*bblock[2][j]
- ablock[4][3]*bblock[3][j]
- ablock[4][4]*bblock[4][j];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
c[0][0] = c[0][0]*pivot;
c[0][1] = c[0][1]*pivot;
c[0][2] = c[0][2]*pivot;
c[0][3] = c[0][3]*pivot;
c[0][4] = c[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
c[1][0] = c[1][0] - coeff*c[0][0];
c[1][1] = c[1][1] - coeff*c[0][1];
c[1][2] = c[1][2] - coeff*c[0][2];
c[1][3] = c[1][3] - coeff*c[0][3];
c[1][4] = c[1][4] - coeff*c[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
c[2][0] = c[2][0] - coeff*c[0][0];
c[2][1] = c[2][1] - coeff*c[0][1];
c[2][2] = c[2][2] - coeff*c[0][2];
c[2][3] = c[2][3] - coeff*c[0][3];
c[2][4] = c[2][4] - coeff*c[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
c[3][0] = c[3][0] - coeff*c[0][0];
c[3][1] = c[3][1] - coeff*c[0][1];
c[3][2] = c[3][2] - coeff*c[0][2];
c[3][3] = c[3][3] - coeff*c[0][3];
c[3][4] = c[3][4] - coeff*c[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
c[4][0] = c[4][0] - coeff*c[0][0];
c[4][1] = c[4][1] - coeff*c[0][1];
c[4][2] = c[4][2] - coeff*c[0][2];
c[4][3] = c[4][3] - coeff*c[0][3];
c[4][4] = c[4][4] - coeff*c[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
c[1][0] = c[1][0]*pivot;
c[1][1] = c[1][1]*pivot;
c[1][2] = c[1][2]*pivot;
c[1][3] = c[1][3]*pivot;
c[1][4] = c[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
c[0][0] = c[0][0] - coeff*c[1][0];
c[0][1] = c[0][1] - coeff*c[1][1];
c[0][2] = c[0][2] - coeff*c[1][2];
c[0][3] = c[0][3] - coeff*c[1][3];
c[0][4] = c[0][4] - coeff*c[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
c[2][0] = c[2][0] - coeff*c[1][0];
c[2][1] = c[2][1] - coeff*c[1][1];
c[2][2] = c[2][2] - coeff*c[1][2];
c[2][3] = c[2][3] - coeff*c[1][3];
c[2][4] = c[2][4] - coeff*c[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
c[3][0] = c[3][0] - coeff*c[1][0];
c[3][1] = c[3][1] - coeff*c[1][1];
c[3][2] = c[3][2] - coeff*c[1][2];
c[3][3] = c[3][3] - coeff*c[1][3];
c[3][4] = c[3][4] - coeff*c[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
c[4][0] = c[4][0] - coeff*c[1][0];
c[4][1] = c[4][1] - coeff*c[1][1];
c[4][2] = c[4][2] - coeff*c[1][2];
c[4][3] = c[4][3] - coeff*c[1][3];
c[4][4] = c[4][4] - coeff*c[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
c[2][0] = c[2][0]*pivot;
c[2][1] = c[2][1]*pivot;
c[2][2] = c[2][2]*pivot;
c[2][3] = c[2][3]*pivot;
c[2][4] = c[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
c[0][0] = c[0][0] - coeff*c[2][0];
c[0][1] = c[0][1] - coeff*c[2][1];
c[0][2] = c[0][2] - coeff*c[2][2];
c[0][3] = c[0][3] - coeff*c[2][3];
c[0][4] = c[0][4] - coeff*c[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
c[1][0] = c[1][0] - coeff*c[2][0];
c[1][1] = c[1][1] - coeff*c[2][1];
c[1][2] = c[1][2] - coeff*c[2][2];
c[1][3] = c[1][3] - coeff*c[2][3];
c[1][4] = c[1][4] - coeff*c[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
c[3][0] = c[3][0] - coeff*c[2][0];
c[3][1] = c[3][1] - coeff*c[2][1];
c[3][2] = c[3][2] - coeff*c[2][2];
c[3][3] = c[3][3] - coeff*c[2][3];
c[3][4] = c[3][4] - coeff*c[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
c[4][0] = c[4][0] - coeff*c[2][0];
c[4][1] = c[4][1] - coeff*c[2][1];
c[4][2] = c[4][2] - coeff*c[2][2];
c[4][3] = c[4][3] - coeff*c[2][3];
c[4][4] = c[4][4] - coeff*c[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
c[3][0] = c[3][0]*pivot;
c[3][1] = c[3][1]*pivot;
c[3][2] = c[3][2]*pivot;
c[3][3] = c[3][3]*pivot;
c[3][4] = c[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
c[0][0] = c[0][0] - coeff*c[3][0];
c[0][1] = c[0][1] - coeff*c[3][1];
c[0][2] = c[0][2] - coeff*c[3][2];
c[0][3] = c[0][3] - coeff*c[3][3];
c[0][4] = c[0][4] - coeff*c[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
c[1][0] = c[1][0] - coeff*c[3][0];
c[1][1] = c[1][1] - coeff*c[3][1];
c[1][2] = c[1][2] - coeff*c[3][2];
c[1][3] = c[1][3] - coeff*c[3][3];
c[1][4] = c[1][4] - coeff*c[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
c[2][0] = c[2][0] - coeff*c[3][0];
c[2][1] = c[2][1] - coeff*c[3][1];
c[2][2] = c[2][2] - coeff*c[3][2];
c[2][3] = c[2][3] - coeff*c[3][3];
c[2][4] = c[2][4] - coeff*c[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
c[4][0] = c[4][0] - coeff*c[3][0];
c[4][1] = c[4][1] - coeff*c[3][1];
c[4][2] = c[4][2] - coeff*c[3][2];
c[4][3] = c[4][3] - coeff*c[3][3];
c[4][4] = c[4][4] - coeff*c[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
c[4][0] = c[4][0]*pivot;
c[4][1] = c[4][1]*pivot;
c[4][2] = c[4][2]*pivot;
c[4][3] = c[4][3]*pivot;
c[4][4] = c[4][4]*pivot;
r[4] = r[4] *pivot;
coeff = lhs[0][4];
c[0][0] = c[0][0] - coeff*c[4][0];
c[0][1] = c[0][1] - coeff*c[4][1];
c[0][2] = c[0][2] - coeff*c[4][2];
c[0][3] = c[0][3] - coeff*c[4][3];
c[0][4] = c[0][4] - coeff*c[4][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
c[1][0] = c[1][0] - coeff*c[4][0];
c[1][1] = c[1][1] - coeff*c[4][1];
c[1][2] = c[1][2] - coeff*c[4][2];
c[1][3] = c[1][3] - coeff*c[4][3];
c[1][4] = c[1][4] - coeff*c[4][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
c[2][0] = c[2][0] - coeff*c[4][0];
c[2][1] = c[2][1] - coeff*c[4][1];
c[2][2] = c[2][2] - coeff*c[4][2];
c[2][3] = c[2][3] - coeff*c[4][3];
c[2][4] = c[2][4] - coeff*c[4][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
c[3][0] = c[3][0] - coeff*c[4][0];
c[3][1] = c[3][1] - coeff*c[4][1];
c[3][2] = c[3][2] - coeff*c[4][2];
c[3][3] = c[3][3] - coeff*c[4][3];
c[3][4] = c[3][4] - coeff*c[4][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvrhs( double lhs[5][5], double r[5] ) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
r[4] = r[4] *pivot;
coeff = lhs[0][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Y direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix][
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsy();
y_solve_cell();
y_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell][ then generate U(jsize)=rhs(jsize)
c else assume U(jsize) is loaded in un pack backsub_info
c so just use it
c after call u(jstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (j = grid_points[1]-2; j >= 0; j--) {
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(JMAX) and rhs'(JMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, jsize;
jsize = grid_points[1]-1;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(i,0,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][0][k][BB],
lhs[i][0][k][CC],
rhs[i][0][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (j = 1; j < jsize; j++) {
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(j-1) from lhs_vector(j)
c
c rhs(j) = rhs(j) - A*rhs(j-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j-1][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(j) = B(j) - C(j-1)*A(j)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j-1][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][jsize][k][AA],
rhs[i][jsize-1][k], rhs[i][jsize][k]);
/*--------------------------------------------------------------------
c B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
c call matmul_sub(aa,i,jsize,k,c,
c $ cc,i,jsize-1,k,c,BB,i,jsize,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][jsize][k][AA],
lhs[i][jsize-1][k][CC],
lhs[i][jsize][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][jsize][k][BB],
rhs[i][jsize][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Z direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsz();
z_solve_cell();
z_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(ksize)=rhs(ksize)
c else assume U(ksize) is loaded in un pack backsub_info
c so just use it
c after call u(kstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = grid_points[2]-2; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(KMAX) and rhs'(KMAX) will be sent to next cell.
c-------------------------------------------------------------------*/
int i,j,k,ksize;
ksize = grid_points[2]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c multiply c(i,j,0) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][0][BB],
lhs[i][j][0][CC],
rhs[i][j][0] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (k = 1; k < ksize; k++) {
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(k-1) from lhs_vector(k)
c
c rhs(k) = rhs(k) - A*rhs(k-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j][k-1], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(k) = B(k) - C(k-1)*A(k)
c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j][k-1][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
c Now finish up special cases for last cell
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][ksize][AA],
rhs[i][j][ksize-1], rhs[i][j][ksize]);
/*--------------------------------------------------------------------
c B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
c call matmul_sub(aa,i,j,ksize,c,
c $ cc,i,j,ksize-1,c,BB,i,j,ksize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][ksize][AA],
lhs[i][j][ksize-1][CC],
lhs[i][j][ksize][BB]);
/*--------------------------------------------------------------------
c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][ksize][BB],
rhs[i][j][ksize] );
}
}
}
|
hmap_mk_loc.c | /*
* Copyright (c) 2019 Ramesh Subramonian <subramonian@gmail.com>
* All rights reserved.
*
* Use is subject to license terms, as specified in the LICENSE file.
*/
//START_INCLUDES
#include "hmap_common.h"
//STOPINCLUDES
#include "_hmap_mk_loc.h"
//START_FOR_CDEF
int
hmap_mk_loc(
uint32_t *hashes, // input [nkeys]
uint32_t nkeys, // input
uint32_t hmap_size, // input
uint32_t *locs // output [nkeys]
)
//STOP_FOR_CDEF
{
int status = 0;
uint64_t divinfo = fast_div32_init(hmap_size);
#pragma omp parallel for schedule(static, RH_CHUNK_SIZE)
for ( uint32_t i = 0; i < nkeys; i++ ) {
locs[i] = fast_rem32(hashes[i], hmap_size, divinfo);
}
return status;
}
|
residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/checks.h"
#include "utilities/time_discretization.h"
#include "solving_strategies/schemes/residual_based_implicit_time_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* This scheme is designed to solve a system of the type:
*\f[
* \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
*
* If we call:
*
* - Second derivative:
* -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i
* - First derivative:
* -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i
* - Third derivative:
* -# \f$ u_{ni} \f$ the variable at the step i
*
* Then we assume:
* \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f]
* \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f]
* with for order 2 (BDF2):
* -# \f$ c_0 = \frac{1.5}{dt} \f$
* -# \f$ c_1 = \frac{-2.0}{dt} \f$
* -# \f$ c_2 = \frac{0.5}{dt} \f$
*
* The LHS and RHS can be defined as:
* \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f]
* and
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f]
* @note This implies that elements are expected to be written in terms
* of a variable with two time derivatives
* <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a>
* @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFScheme
: public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
/// Definition of epsilon
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFScheme(const std::size_t Order = 2)
:ImplicitBaseType(),
mOrder(Order),
mpBDFUtility(Kratos::make_unique<TimeDiscretization::BDF>(Order))
{
// Allocate auxiliary memory
const std::size_t num_threads = OpenMPUtils::GetNumThreads();
mVector.dotun0.resize(num_threads);
mVector.dot2un0.resize(num_threads);
// Doing a minimal check
KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl;
// We resize the BDF coefficients
if (mBDF.size() != (mOrder + 1))
mBDF.resize(mOrder + 1);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther)
:ImplicitBaseType(rOther)
,mOrder(rOther.mOrder)
,mBDF(rOther.mBDF)
,mVector(rOther.mVector)
,mpBDFUtility(nullptr)
{
Kratos::unique_ptr<TimeDiscretization::BDF> auxiliar_pointer = Kratos::make_unique<TimeDiscretization::BDF>(mOrder);
mpBDFUtility.swap(auxiliar_pointer);
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step
* \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f]
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Update of displacement (by DOF)
mpDofUpdater->UpdateDofs(rDofSet, rDx);
UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
KRATOS_ERROR << "Calling base BDF class" << std::endl;
KRATOS_CATCH( "" );
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
* @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
mpBDFUtility->ComputeAndSaveBDFCoefficients(r_current_process_info);
mBDF = r_current_process_info[BDF_COEFFICIENTS];
KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2)
<< "For higher orders than 2 the time step is assumed to be constant.\n";
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed on the input provided.
* @details Checks can be "expensive" as the function is designed to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY;
const int err = ImplicitBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for minimum value of the buffer index
// Verify buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than " << mOrder + 1 << ". Current size is " << rModelPart.GetBufferSize() << std::endl;
KRATOS_CATCH( "" );
return 0;
}
/// Free memory allocated by this class.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralVectors
{
std::vector< Vector > dotun0; /// First derivative
std::vector< Vector > dot2un0; /// Second derivative
};
const std::size_t mOrder; /// The integration order
Vector mBDF; /// The BDF coefficients
GeneralVectors mVector; /// The structure containing the derivatives
Kratos::unique_ptr<TimeDiscretization::BDF> mpBDFUtility; /// Utility to compute BDF coefficients
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Performing the update of the derivatives
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
inline void UpdateDerivatives(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
)
{
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
}
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f]
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2);
}
// Adding damping contribution
if (rD.size1() != 0) { // if D matrix declared
noalias(rLHS_Contribution) += rD * mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the objects
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rObject The object to compute
* @param rRHS_Contribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
template <typename TObjectType>
void TemplateAddDynamicsToRHS(
TObjectType rObject,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
)
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
rObject->GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]);
}
// Adding damping contribution
if (rD.size1() != 0) {
rObject->GetFirstDerivativesVector(mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param pElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element::Pointer pElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Element::Pointer>(pElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f]
* @param pCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition::Pointer pCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Condition::Pointer>(pCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Utility class to perform the update after solving the system, will be different in MPI runs.
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
|
levmarq.h | #ifndef ARUCO_MM__LevMarq_H
#define ARUCO_MM__LevMarq_H
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <functional>
#include <iostream>
#include <cmath>
#include "ar_omp.h"
#include <ctime>
#include <cstring>
#include <vector>
#include <chrono>
#include <iomanip>
namespace aruco{
// Levenberg-Marquardt method for general problems Inspired in
//@MISC\{IMM2004-03215,
// author = "K. Madsen and H. B. Nielsen and O. Tingleff",
// title = "Methods for Non-Linear Least Squares Problems (2nd ed.)",
// year = "2004",
// pages = "60",
// publisher = "Informatics and Mathematical Modelling, Technical University of Denmark, {DTU}",
// address = "Richard Petersens Plads, Building 321, {DK-}2800 Kgs. Lyngby",
// url = "http://www.ltu.se/cms_fs/1.51590!/nonlinear_least_squares.pdf"
//}
template<typename T>
class LevMarq{
public:
typedef Eigen::Matrix<T,Eigen::Dynamic,1> eVector;
typedef std::function<void(const eVector &, eVector &)> F_z_x;
typedef std::function<void(const eVector &, Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> &)> F_z_J;
LevMarq();
/**
* @brief Constructor with parms
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
LevMarq(int maxIters,double minError,double min_step_error_diff=0,double tau=1 ,double der_epsilon=1e-3);
/**
* @brief setParams
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
void setParams(int maxIters,double minError,double min_step_error_diff=0,double tau=1 ,double der_epsilon=1e-3);
/**
* @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t
* @param z function params 1xP to be estimated. input-output. Contains the result of the optimization
* @param f_z_x evaluation function f(z)=x
* first parameter : z : input. Data is in double precision as a row vector (1xp)
* second parameter : x : output. Data must be returned in double
* @param f_J computes the jacobian of f(z)
* first parameter : z : input. Data is in double precision as a row vector (1xp)
* second parameter : J : output. Data must be returned in double
* @return final error
*/
double solve( eVector &z, F_z_x , F_z_J)throw (std::exception);
/// Step by step solve mode
/**
* @brief init initializes the search engine
* @param z
*/
void init(eVector &z, F_z_x )throw (std::exception);
/**
* @brief step gives a step of the search
* @param f_z_x error evaluation function
* @param f_z_J Jacobian function
* @return error of current solution
*/
bool step( F_z_x f_z_x , F_z_J f_z_J)throw (std::exception);
bool step( F_z_x f_z_x)throw (std::exception);
/**
* @brief getCurrentSolution returns the current solution
* @param z output
* @return error of the solution
*/
double getCurrentSolution(eVector &z)throw (std::exception);
/**
* @brief getBestSolution sets in z the best solution up to this moment
* @param z output
* @return error of the solution
*/
double getBestSolution(eVector &z)throw (std::exception);
/** Automatic jacobian estimation
* @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t
* @param z function params 1xP to be estimated. input-output. Contains the result of the optimization
* @param f_z_x evaluation function f(z)=x
* first parameter : z : input. Data is in double precision as a row vector (1xp)
* second parameter : x : output. Data must be returned in double
* @return final error
*/
double solve( eVector &z, F_z_x )throw (std::exception);
//to enable verbose mode
bool & verbose(){return _verbose;}
//sets a callback func call at each step
void setStepCallBackFunc(std::function<void(const eVector &)> callback){_step_callback=callback;}
//sets a function that indicates when the algorithm must be stop. returns true if must stop and false otherwise
void setStopFunction( std::function<bool(const eVector &)> stop_function){_stopFunction=stop_function;}
void calcDerivates(const eVector & z , Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> &, F_z_x);
private:
int _maxIters;
double _minErrorAllowed,_der_epsilon,_tau,_min_step_error_diff;
bool _verbose;
//--------
eVector curr_z,x64;
double currErr,prevErr,minErr ;
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> I,J;
double mu,v;
std::function<void(const eVector &)> _step_callback;
std::function<bool(const eVector &)> _stopFunction;
};
template<typename T>
LevMarq<T>::LevMarq(){
_maxIters=1000;_minErrorAllowed=0;_der_epsilon=1e-3;_verbose=false;_tau=1;v=5;_min_step_error_diff=0;
}
/**
* @brief Constructor with parms
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
template<typename T>
LevMarq<T>::LevMarq(int maxIters,double minError,double min_step_error_diff,double tau ,double der_epsilon ){
_maxIters=maxIters;_minErrorAllowed=minError;_der_epsilon=der_epsilon;_verbose=false;_tau=tau;v=5;_min_step_error_diff=min_step_error_diff;
}
/**
* @brief setParams
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
template<typename T>
void LevMarq<T>::setParams(int maxIters,double minError,double min_step_error_diff,double tau ,double der_epsilon){
_maxIters=maxIters;
_minErrorAllowed=minError;
_der_epsilon=der_epsilon;
_tau=tau;
_min_step_error_diff=min_step_error_diff;
}
template<typename T>
void LevMarq<T>:: calcDerivates(const eVector & z , Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> &J, F_z_x f_z_x)
{
#pragma omp parallel for
for (int i=0;i<z.rows();i++) {
eVector zp(z),zm(z);
zp(i)+=_der_epsilon;
zm(i)-=_der_epsilon;
eVector xp,xm;
f_z_x( zp,xp);
f_z_x( zm,xm);
eVector dif=(xp-xm)/(2.f*_der_epsilon);
J.middleCols(i,1)=dif;
}
}
template<typename T>
double LevMarq<T>:: solve( eVector &z, F_z_x f_z_x)throw (std::exception){
return solve(z,f_z_x,std::bind(&LevMarq::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
}
template<typename T>
bool LevMarq<T>:: step( F_z_x f_z_x)throw (std::exception){
return step(f_z_x,std::bind(&LevMarq::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
}
template<typename T>
void LevMarq<T>::init(eVector &z, F_z_x f_z_x )throw (std::exception){
curr_z=z;
I.resize(z.rows(),z.rows());
I.setIdentity();
f_z_x(curr_z,x64);
minErr=currErr=prevErr=x64.cwiseProduct(x64).sum();
J.resize(x64.rows(),z.rows());
mu=-1;
}
#define splm_get_time(a,b) std::chrono::duration_cast<std::chrono::duration<double>>(a-b).count()
template<typename T>
bool LevMarq<T>::step( F_z_x f_z_x, F_z_J f_J)throw (std::exception){
f_J(curr_z,J);
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> Jt=J.transpose();
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> JtJ=(Jt*J);
eVector B=-Jt*x64;
if(mu<0){//first time only
int max=0;
for(int j=1;j<JtJ.cols();j++) if (JtJ(j,j)>JtJ(max,max)) max=j;
mu=JtJ(max,max)*_tau;
}
double gain=0,prev_mu=0;
int ntries=0;
bool isStepAccepted=false;
do{
//add/update dumping factor to JtJ.
//very efficient in any case, but particularly if initial dump does not produce improvement and must reenter
for(int j=0;j<JtJ.cols();j++) JtJ(j,j) += mu-prev_mu;//update mu
prev_mu=mu;
eVector delta= JtJ.ldlt().solve(B);
eVector estimated_z=curr_z+delta;
//compute error
f_z_x(estimated_z,x64);
auto err=x64.cwiseProduct(x64).sum();
auto L=0.5*delta.transpose()*((mu*delta) - B);
gain= (err-prevErr)/ L(0,0) ;
//get gain
if (gain>0){
mu=mu*std::max(double(0.33),1.-pow(2*gain-1,3));
v=5.f;
currErr=err;
curr_z=estimated_z;
isStepAccepted=true;
}
else{ mu=mu*v; v=v*5;}
}while(gain<=0 && ntries++<5);
if (_verbose) std::cout<<std::setprecision(5) <<"Curr Error="<<currErr<<" AErr(prev-curr)="<<prevErr-currErr<<" gain="<<gain<<" dumping factor="<<mu<<std::endl;
// //check if we must move to the new position or exit
if ( currErr<prevErr)
std::swap ( currErr,prevErr );
return isStepAccepted;
}
template<typename T>
double LevMarq<T>:: getCurrentSolution(eVector &z)throw (std::exception){
z=curr_z;
return currErr;
}
template<typename T>
double LevMarq<T>::solve( eVector &z, F_z_x f_z_x, F_z_J f_J)throw (std::exception){
init(z,f_z_x);
if( _stopFunction){
do{
step(f_z_x,f_J);
if (_step_callback) _step_callback(curr_z);
}while(!_stopFunction(curr_z));
}
else{
//intial error estimation
int mustExit=0;
for ( int i = 0; i < _maxIters && !mustExit; i++ ) {
if (_verbose)std::cerr<<"iteration "<<i<<"/"<<_maxIters<< " ";
bool isStepAccepted=step(f_z_x,f_J);
//check if we must exit
if ( currErr<_minErrorAllowed ) mustExit=1;
if( fabs( prevErr -currErr)<=_min_step_error_diff || !isStepAccepted) mustExit=2;
//exit if error increment
if (currErr<prevErr )mustExit=3;
// if ( (prevErr-currErr) < 1e-5 ) mustExit=true;
if (_step_callback) _step_callback(curr_z);
}
// std::cout<<"Exit code="<<mustExit<<std::endl;
}
z=curr_z;
return currErr;
}
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
register unsigned char
*p,
*q;
register ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
packet_size,
row_size;
register ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
attack_mp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_cdf.h>
#include <omp.h>
#include "util.h"
int main(int argc, char *argv[]) {
char* input_fileName1 = argv[1];
char* input_fileName2 = argv[2];
int N_doc_bg = atoi(argv[3]);
int N_kw = atoi(argv[4]);
int N_obs = atoi(argv[5]);
int N_iter = atoi(argv[6]);
int m = atoi(argv[7]);
double p = 0.89;
double q = atof(argv[8]);
char* output_fileName = argv[9];
int N_doc = 480000/2;
int* matrix = (int*) malloc(sizeof(int) * N_obs * N_obs);
int* matrix_bg = (int*) malloc(sizeof(int) * N_kw * N_kw);
int* matrix_padded = (int*) malloc(sizeof(int) * N_obs * N_obs);
int* true_index = (int*) malloc(sizeof(int) * N_kw);
int* permutation = (int*) malloc(sizeof(int) * N_obs);
gsl_matrix* matrix_obs;
// Setup
for (int round = 0; round < 10; round++)
{
char input_fileName1_extend[40];
char input_fileName2_extend[40];
sprintf(input_fileName1_extend, "%s%d", input_fileName1, round);
sprintf(input_fileName2_extend, "%s%d", input_fileName2, round);
struct timeval tv1,tv2;
gettimeofday(&tv1, NULL);
read_matrix(&true_index, &matrix_bg, 1.0*N_doc/N_doc_bg, N_kw, input_fileName2_extend);
read_matrix(&true_index, &matrix, 1.0, N_obs, input_fileName1_extend);
gettimeofday(&tv2, NULL);
printf("Reading done: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
fflush(stdout);
for (int iter = 0; iter < 10; iter++)
{
printf("Run %d\n", iter);
matrix_obs = gsl_matrix_alloc(N_obs, N_obs);
gettimeofday(&tv1, NULL);
pad_matrix(&matrix_padded, &matrix, m, p, q, N_obs, N_doc);
gettimeofday(&tv2, NULL);
printf("Padding done: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
fflush(stdout);
gettimeofday(&tv1, NULL);
observe_matrix(matrix_obs, &matrix_padded, N_obs);
gettimeofday(&tv2, NULL);
printf("Observed matrix generated: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
fflush(stdout);
// Permute observed matrix randomly and attack
gettimeofday(&tv1, NULL);
attack(matrix_obs, &matrix_bg, &permutation, m, p, q, N_kw, N_obs, N_doc, N_iter);
gettimeofday(&tv2, NULL);
printf("Main attack done: %f.\n", (double) ((tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)) / N_iter);
fflush(stdout);
//for (int ii = 0; ii < 20; ii++)
// printf("%d, %d, %d, %d, %d\n", permutation[ii], true_index[ii], matrix[ii*N_obs+ii], matrix_bg[true_index[ii]*N_kw+true_index[ii]], matrix_bg[permutation[ii]*N_kw+permutation[ii]]);
char output_fileName_full[40];
sprintf(output_fileName_full, "%s%d-%d", output_fileName, round, iter);
print_result(output_fileName_full, &permutation, &true_index, N_obs);
//sprintf(output_fileName_full, "%s-%d-full", output_fileName, iter);
//print_full_result(output_fileName_full, permutation_matrix, N_kw);
}
}
free(matrix);
free(matrix_bg);
free(matrix_padded);
gsl_matrix_free(matrix_obs);
return(0);
}
double log_score(int idx1, int idx2, gsl_matrix* matrix_obs, int** matrix, int** permutation, int m, double p, double q, int N_kw, int N_doc)
{
int idx1_m = (*permutation)[idx1];
int idx2_m = (*permutation)[idx2];
double score = 0.0;
if (idx1 == idx2)
{
int N1 = m * (*matrix)[idx1_m*N_kw + idx2_m];
double N1_mean = p * N1;
double N1_var = p * (1-p) * N1 + (m * N_doc - N1) * N1 * 1.0 / N_doc / m;
int N2 = m * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]);
double N2_mean = q * N2;
double N2_var = q * (1-q) * N2 + (m * N_doc - N2) * N2 * 1.0 / N_doc / m;
double N3_var = 1.0 * m / N_doc * (*matrix)[idx1_m*N_kw + idx2_m] * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]);
int k = (int) gsl_matrix_get(matrix_obs, idx1, idx2);
score = gsl_cdf_gaussian_P(floor(k - N1_mean - N2_mean) + 0.5, sqrt(N1_var + N2_var + N3_var));
score -= gsl_cdf_gaussian_P(floor(k - N1_mean - N2_mean) - 0.5, sqrt(N1_var + N2_var + N3_var));
}
else
{
int N1 = m * (*matrix)[idx1_m*N_kw + idx2_m];
double N1_mean = p * p * N1;
double N1_var = p * p * (1-p) * (1-p)* N1;
int N2 = m * ((*matrix)[idx1_m*N_kw + idx1_m] + (*matrix)[idx2_m*N_kw + idx2_m] - 2*(*matrix)[idx1_m*N_kw + idx2_m]);
double N2_mean = p * q * N2;
double N2_var = p * q * (1-p) * (1-q) * N2;
int N3 = m * N_doc - N1 - N2;
double N3_mean = q * q * N3;
double N3_var = q * q * (1-q) * (1-q) * N3;
double N4_var = 1.0 * m / N_doc * (*matrix)[idx1_m*N_kw + idx2_m] * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]);
int M = (int) gsl_matrix_get(matrix_obs, idx1, idx2);
score = gsl_cdf_gaussian_P(floor(M - N1_mean - N2_mean - N3_mean) + 0.5, sqrt(N1_var + N2_var + N3_var + N4_var));
score -= gsl_cdf_gaussian_P(floor(M - N1_mean - N2_mean - N3_mean) - 0.5, sqrt(N1_var + N2_var + N3_var + N4_var));
}
if (score == 0)
return(-500.0);
return(log(score));
}
void attack(gsl_matrix* matrix_obs, int** matrix, int** permutation, int m, double p, double q, int N_kw, int N_obs, int N_doc, int N_iter)
{
// Initialise data structures
double* score_matrix = (double*) malloc(sizeof(double) * N_kw * N_kw);
double* score_row1 = (double*) malloc(sizeof(double) * N_kw);
double* score_row2 = (double*) malloc(sizeof(double) * N_kw);
int* permutation_tmp = (int*) malloc(sizeof(int) * N_obs);
int* permutation_inv = (int*) malloc(sizeof(int) * N_kw);
// Initialising RNG
const gsl_rng_type * T;
gsl_rng * r;
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc (T);
struct timeval tv1,tv2;
gettimeofday(&tv1, NULL);
solution_initial(permutation, matrix_obs, matrix, m, p, q, N_kw, N_obs, N_doc);
for (int ii = 0; ii < N_obs; ii++)
permutation_tmp[ii] = (*permutation)[ii];
for (int ii = 0; ii < N_kw; ii++)
permutation_inv[ii] = -1;
for (int ii = 0; ii < N_obs; ii++)
permutation_inv[permutation_tmp[ii]] = ii;
// Compute initial score
#pragma omp parallel for shared(score_matrix, matrix_obs, matrix)
for (int ii = 0; ii < N_obs * N_obs; ii++)
score_matrix[ii] = log_score((int) (ii / N_obs), ii % N_obs, matrix_obs, matrix, permutation, m, p, q, N_kw, N_doc);
gettimeofday(&tv2, NULL);
printf("Initial score computed: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
// Iterations of simulated annealing
double temp = (double) N_kw;
int N_stuck = 0;
for (int iter = 0; iter < N_iter; iter++)
{
/* Status code */
if (iter % (N_iter / 10) == 0)
{
gettimeofday(&tv1, NULL);
printf("Iteration: %d, %d, %d.\n", iter, N_stuck, (int) (tv1.tv_sec - tv2.tv_sec));
fflush(stdout);
gettimeofday(&tv2, NULL);
}
if (N_stuck >= (N_iter / 20))
iter = N_iter;
int idx1, idx2;
permutation_generation(&idx1, &idx2, &permutation_tmp, permutation, &permutation_inv, matrix_obs, matrix, m, p, q, N_kw, N_obs, N_doc);
if (idx1 == idx2)
{
N_stuck++;
continue;
}
int ii = 0;
#pragma omp parallel for shared(score_row1)
for (ii = 0; ii < N_obs; ii++)
score_row1[ii] = log_score(idx1, ii, matrix_obs, matrix, &permutation_tmp, m, p, q, N_kw, N_doc);
if (idx2 >= 0)
#pragma omp parallel for shared(score_row2)
for (ii = 0; ii < N_obs; ii++)
score_row2[ii] = log_score(idx2, ii, matrix_obs, matrix, &permutation_tmp, m, p, q, N_kw, N_doc);
double score_diff = 0;
for (int ii = 0; ii < N_obs; ii++)
score_diff += score_row1[ii];
for (int ii = 0; ii < N_obs; ii++)
score_diff -= score_matrix[idx1*N_obs + ii];
if (idx2 >= 0)
{
for (int ii = 0; ii < N_obs; ii++)
score_diff += score_row2[ii];
for (int ii = 0; ii < N_obs; ii++)
score_diff -= score_matrix[idx2*N_obs + ii];
}
// compute difference in score, with exponentiation
score_diff = score_diff / temp;
if (score_diff < -40)
score_diff = 0;
else if (score_diff > 0)
score_diff = 1.01;
else
score_diff = exp(score_diff);
if (gsl_ran_flat(r, 0, 1) < score_diff)
{
// Update the scores
for (int ii = 0; ii < N_obs; ii++)
score_matrix[idx1*N_obs + ii] = score_row1[ii];
for (int ii = 0; ii < N_obs; ii++)
score_matrix[ii*N_obs + idx1] = score_row1[ii];
if (idx2 >= 0)
{
for (int ii = 0; ii < N_obs; ii++)
score_matrix[idx2*N_obs + ii] = score_row2[ii];
for (int ii = 0; ii < N_obs; ii++)
score_matrix[ii*N_obs + idx2] = score_row2[ii];
}
// Update the permutation
permutation_inv[(*permutation)[idx1]] = -1;
(*permutation)[idx1] = permutation_tmp[idx1];
permutation_inv[permutation_tmp[idx1]] = idx1;
if (idx2 >= 0)
{
(*permutation)[idx2] = permutation_tmp[idx2];
permutation_inv[permutation_tmp[idx2]] = idx2;
}
N_stuck = 0;
}
else
{
// Update the permutation
permutation_tmp[idx1] = (*permutation)[idx1];
if (idx2 >= 0)
permutation_tmp[idx2] = (*permutation)[idx2];
N_stuck += 1;
}
temp *= 0.995;
}
free(score_matrix);
free(score_row1);
free(score_row2);
gsl_rng_free(r);
}
void print_result(char* output_fileName, int** permutation, int** true_index, int N_obs)
{
FILE* fp = fopen(output_fileName, "w");
int count = 0;
int count2 = 0;
for (int ii = 0; ii < N_obs; ii++)
if ((*permutation)[ii] == (*true_index)[ii])
count++;
fprintf(fp, "%d\n", count);
fclose(fp);
printf("Success: %d/%d.\n", count, N_obs);
}
void print_full_result(char* output_fileName, int** permutation, int** true_index, int N_obs)
{
FILE* fp = fopen(output_fileName, "w");
int count = 0;
for (int ii = 0; ii < N_obs; ii++)
if ((*permutation)[ii] == (*true_index)[ii])
count++;
fprintf(fp, "%d\n", count);
fclose(fp);
} |
trsm_x_coo_n_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->nnz; r++)
{
if(A->row_indx[r] == A->col_indx[r])
{
diag[A->row_indx[r]] = A->values[r];
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r < m; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT cr = 0; cr < A->nnz; cr++)
{
int row = A->row_indx[cr];
int col = A->col_indx[cr];
if(row == r && col < r)
alpha_madde(temp, A->values[cr], y[col * ldy + out_y_col]);
}
ALPHA_Number t;
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(t, t, temp);
alpha_div(y[r * ldy + out_y_col], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
ast-dump-openmp-begin-declare-variant_9.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// expected-no-diagnostics
int also_before(void) {
return 0;
}
#pragma omp begin declare variant match(implementation={vendor(llvm)})
int also_after(void) {
return 1;
}
int also_before(void) {
return 2;
}
#pragma omp end declare variant
int also_after(void) {
return 0;
}
void foo(void);
typedef int(*fd)(void);
int main(void) {
// Should return 0.
fd fns[2];
fns[0] = &also_before;
fns[1] = also_after;
return (foo(), also_after)() +
(fns[0])() +
(1[fns])();
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// C-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// C-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, col:14> col:6 used foo 'void ({{.*}})'
// C-NEXT: |-TypedefDecl [[ADDR_23:0x[a-z0-9]*]] <line:23:1, col:22> col:14 referenced fd 'int (*)({{.*}})'
// C-NEXT: | `-PointerType [[ADDR_24:0x[a-z0-9]*]] 'int (*)({{.*}})'
// C-NEXT: | `-ParenType [[ADDR_25:0x[a-z0-9]*]] 'int ({{.*}})' sugar
// C-NEXT: | `-FunctionProtoType [[ADDR_26:0x[a-z0-9]*]] 'int ({{.*}})' cdecl
// C-NEXT: | `-BuiltinType [[ADDR_27:0x[a-z0-9]*]] 'int'
// C-NEXT: `-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:24:1, line:32:1> line:24:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:16, line:32:1>
// C-NEXT: |-DeclStmt [[ADDR_30:0x[a-z0-9]*]] <line:26:3, col:12>
// C-NEXT: | `-VarDecl [[ADDR_31:0x[a-z0-9]*]] <col:3, col:11> col:6 used fns 'fd[2]'
// C-NEXT: |-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:3, col:13> 'fd':'int (*)({{.*}})' '='
// C-NEXT: | |-ArraySubscriptExpr [[ADDR_33:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:7> 'int' 0
// C-NEXT: | `-UnaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:12, col:13> 'int (*)({{.*}})' prefix '&' cannot overflow
// C-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:13> 'int ({{.*}})' Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// C-NEXT: |-BinaryOperator [[ADDR_39:0x[a-z0-9]*]] <line:28:3, col:12> 'fd':'int (*)({{.*}})' '='
// C-NEXT: | |-ArraySubscriptExpr [[ADDR_40:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:7> 'int' 1
// C-NEXT: | `-ImplicitCastExpr [[ADDR_44:0x[a-z0-9]*]] <col:12> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// C-NEXT: `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:29:3, line:31:19>
// C-NEXT: `-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <line:29:10, line:31:19> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <line:29:10, line:30:19> 'int' '+'
// C-NEXT: | |-CallExpr [[ADDR_49:0x[a-z0-9]*]] <line:29:10, col:30> 'int'
// C-NEXT: | | `-ParenExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:28> 'int (*)({{.*}})'
// C-NEXT: | | `-BinaryOperator [[ADDR_51:0x[a-z0-9]*]] <col:11, col:18> 'int (*)({{.*}})' ','
// C-NEXT: | | |-CallExpr [[ADDR_52:0x[a-z0-9]*]] <col:11, col:15> 'void'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_53:0x[a-z0-9]*]] <col:11> 'void (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:11> 'void ({{.*}})' Function [[ADDR_22]] 'foo' 'void ({{.*}})'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <col:18> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:18> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// C-NEXT: | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:30:10, col:19> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// C-NEXT: | `-ParenExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | `-ArraySubscriptExpr [[ADDR_60:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | |-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:11> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:11> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | `-IntegerLiteral [[ADDR_63:0x[a-z0-9]*]] <col:15> 'int' 0
// C-NEXT: `-CallExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:10, col:19> 'int'
// C-NEXT: `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// C-NEXT: `-ParenExpr [[ADDR_66:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: `-ArraySubscriptExpr [[ADDR_67:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: |-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:11> 'int' 1
// C-NEXT: `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:13> 'fd *' <ArrayToPointerDecay>
// C-NEXT: `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:13> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, col:14> col:6 used foo 'void ({{.*}})'
// CXX-NEXT: |-TypedefDecl [[ADDR_23:0x[a-z0-9]*]] <line:23:1, col:22> col:14 referenced fd 'int (*)({{.*}})'
// CXX-NEXT: | `-PointerType [[ADDR_24:0x[a-z0-9]*]] 'int (*)({{.*}})'
// CXX-NEXT: | `-ParenType [[ADDR_25:0x[a-z0-9]*]] 'int ({{.*}})' sugar
// CXX-NEXT: | `-FunctionProtoType [[ADDR_26:0x[a-z0-9]*]] 'int ({{.*}})' cdecl
// CXX-NEXT: | `-BuiltinType [[ADDR_27:0x[a-z0-9]*]] 'int'
// CXX-NEXT: `-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:24:1, line:32:1> line:24:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:16, line:32:1>
// CXX-NEXT: |-DeclStmt [[ADDR_30:0x[a-z0-9]*]] <line:26:3, col:12>
// CXX-NEXT: | `-VarDecl [[ADDR_31:0x[a-z0-9]*]] <col:3, col:11> col:6 used fns 'fd[2]'
// CXX-NEXT: |-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:3, col:13> 'fd':'int (*)({{.*}})' {{.*}}'='
// CXX-NEXT: | |-ArraySubscriptExpr [[ADDR_33:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:7> 'int' 0
// CXX-NEXT: | `-UnaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:12, col:13> 'int (*)({{.*}})' prefix '&' cannot overflow
// CXX-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:13> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CXX-NEXT: |-BinaryOperator [[ADDR_39:0x[a-z0-9]*]] <line:28:3, col:12> 'fd':'int (*)({{.*}})' {{.*}}'='
// CXX-NEXT: | |-ArraySubscriptExpr [[ADDR_40:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:7> 'int' 1
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_44:0x[a-z0-9]*]] <col:12> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CXX-NEXT: `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:29:3, line:31:19>
// CXX-NEXT: `-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <line:29:10, line:31:19> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <line:29:10, line:30:19> 'int' '+'
// CXX-NEXT: | |-CallExpr [[ADDR_49:0x[a-z0-9]*]] <line:29:10, col:30> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:28> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-ParenExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:28> 'int ({{.*}})' lvalue
// CXX-NEXT: | | `-BinaryOperator [[ADDR_52:0x[a-z0-9]*]] <col:11, col:18> 'int ({{.*}})' {{.*}}','
// CXX-NEXT: | | |-CallExpr [[ADDR_53:0x[a-z0-9]*]] <col:11, col:15> 'void'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_54:0x[a-z0-9]*]] <col:11> 'void (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_55:0x[a-z0-9]*]] <col:11> 'void ({{.*}})' {{.*}}Function [[ADDR_22]] 'foo' 'void ({{.*}})'
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:18> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CXX-NEXT: | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:30:10, col:19> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// CXX-NEXT: | `-ParenExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | `-ArraySubscriptExpr [[ADDR_60:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | |-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:11> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:11> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | `-IntegerLiteral [[ADDR_63:0x[a-z0-9]*]] <col:15> 'int' 0
// CXX-NEXT: `-CallExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:10, col:19> 'int'
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// CXX-NEXT: `-ParenExpr [[ADDR_66:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: `-ArraySubscriptExpr [[ADDR_67:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: |-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:11> 'int' 1
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:13> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:13> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
|
GB_full_add_template.c | //------------------------------------------------------------------------------
// GB_full_add_template: phase2 for C=A+B, C<M>=A+B, C<!M>=A+B, C is full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is full. The mask M is not present (otherwise, C would be sparse,
// hypersparse, or bitmap). All of these methods are asymptotically optimal.
// ------------------------------------------
// C = A + B
// ------------------------------------------
// full . sparse full
// full . bitmap full
// full . full sparse
// full . full bitmap
// full . full full
// If C is iso and full, this phase has nothing to do.
#ifndef GB_ISO_ADD
{
int64_t p ;
ASSERT (M == NULL) ;
ASSERT (A_is_full || B_is_full) ;
ASSERT (C_sparsity == GxB_FULL) ;
if (A_is_full && B_is_full)
{
//----------------------------------------------------------------------
// Method30: C, A, B are all full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
}
}
else if (A_is_full)
{
//----------------------------------------------------------------------
// C and A are full; B is hypersparse, sparse, or bitmap
//----------------------------------------------------------------------
if (B_is_bitmap)
{
//------------------------------------------------------------------
// Method31: C and A are full; B is bitmap
//------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
if (Bb [p])
{
// C (i,j) = A (i,j) + B (i,j)
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
}
else
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen);
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
}
}
}
else
{
//------------------------------------------------------------------
// Method32: C and A are full; B is sparse or hypersparse
//------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
}
GB_SLICE_MATRIX (B, 8, chunk) ;
#pragma omp parallel for num_threads(B_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of B(:,k) for this task
int64_t j = GBH (Bh, k) ;
int64_t pB_start, pB_end ;
GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst,
klast, pstart_Bslice, Bp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over B(:,j), the kth vector of B
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Bi [pB] ;
int64_t p = pC_start + i ;
GB_LOAD_A (aij, Ax, p , A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
}
}
}
}
else
{
//----------------------------------------------------------------------
// C and B are full; A is hypersparse, sparse, or bitmap
//----------------------------------------------------------------------
if (A_is_bitmap)
{
//------------------------------------------------------------------
// Method33: C and B are full; A is bitmap
//------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
if (Ab [p])
{
// C (i,j) = A (i,j) + B (i,j)
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
}
else
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar,
bij, p % vlen, p / vlen);
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
}
}
}
else
{
//------------------------------------------------------------------
// Method34: C and B are full; A is hypersparse or sparse
//------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
}
GB_SLICE_MATRIX (A, 8, chunk) ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < A_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Aslice [taskid] ;
int64_t klast = klast_Aslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of A(:,k) for this task
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst,
klast, pstart_Aslice, Ap, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over A(:,j), the kth vector of A
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Ai [pA] ;
int64_t p = pC_start + i ;
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, p , B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
}
}
}
}
}
#endif
|
post_utilities.h | #ifndef POST_UTILITIES_H
#define POST_UTILITIES_H
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/variables.h"
#include "custom_utilities/create_and_destroy.h"
#include "custom_utilities/GeometryFunctions.h"
#include "custom_elements/Particle_Contact_Element.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "utilities/openmp_utils.h"
#include <limits>
#include <iostream>
#include <iomanip>
#include <cmath>
namespace Kratos {
class PostUtilities {
public:
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::NodesContainerType NodesContainerType;
KRATOS_CLASS_POINTER_DEFINITION(PostUtilities);
/// Default constructor.
PostUtilities() {};
/// Destructor.
virtual ~PostUtilities() {};
void AddModelPartToModelPart(ModelPart& rCompleteModelPart, ModelPart& rModelPartToAdd)
{
////WATCH OUT! This function respects the existing Id's!
KRATOS_TRY;
//preallocate the memory needed
int tot_nodes = rCompleteModelPart.Nodes().size() + rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().size();
int tot_elements = rCompleteModelPart.Elements().size() + rModelPartToAdd.GetCommunicator().LocalMesh().Elements().size();
rCompleteModelPart.Nodes().reserve(tot_nodes);
rCompleteModelPart.Elements().reserve(tot_elements);
for (ModelPart::NodesContainerType::ptr_iterator node_it = rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_begin(); node_it != rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_end(); node_it++)
{
rCompleteModelPart.Nodes().push_back(*node_it);
}
for (ModelPart::ElementsContainerType::ptr_iterator elem_it = rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_begin(); elem_it != rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_end(); elem_it++)
{
rCompleteModelPart.Elements().push_back(*elem_it);
}
KRATOS_CATCH("");
}
void AddSpheresNotBelongingToClustersToMixModelPart(ModelPart& rCompleteModelPart, ModelPart& rModelPartToAdd)
{
////WATCH OUT! This function respects the existing Id's!
KRATOS_TRY;
//preallocate the memory needed
int tot_size = rCompleteModelPart.Nodes().size();
for (ModelPart::NodesContainerType::ptr_iterator node_it = rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_begin(); node_it != rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_end(); node_it++)
{
ModelPart::NodeIterator i_iterator = node_it;
Node < 3 > & i = *i_iterator;
if (i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {tot_size += 1;}
}
rCompleteModelPart.Nodes().reserve(tot_size);
rCompleteModelPart.Elements().reserve(tot_size);
for (ModelPart::NodesContainerType::ptr_iterator node_it = rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_begin(); node_it != rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_end(); node_it++)
{
ModelPart::NodeIterator i_iterator = node_it;
Node < 3 > & i = *i_iterator;
if (i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {rCompleteModelPart.Nodes().push_back(*node_it);}
}
for (ModelPart::ElementsContainerType::ptr_iterator elem_it = rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_begin(); elem_it != rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_end(); elem_it++)
{
Node < 3 >& i = (*elem_it)->GetGeometry()[0];
if (i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {rCompleteModelPart.Elements().push_back(*elem_it);}
}
KRATOS_CATCH("");
}
array_1d<double,3> VelocityTrap(ModelPart& rModelPart, const array_1d<double,3>& low_point, const array_1d<double,3>& high_point) {
ElementsArrayType& pElements = rModelPart.GetCommunicator().LocalMesh().Elements();
double velocity_X = 0.0, velocity_Y = 0.0, velocity_Z = 0.0;
int number_of_elements = 0;
using MultipleReduction = CombinedReduction<
SumReduction<double>,
SumReduction<double>,
SumReduction<double>,
SumReduction<int>>;
std::tie(velocity_X,velocity_Y,velocity_Z,number_of_elements) = block_for_each<MultipleReduction>(pElements, [&](ModelPart::ElementType& rElement){
array_1d<double,3> coor = rElement.GetGeometry()[0].Coordinates();
double local_sum_x = 0.0;
double local_sum_y = 0.0;
double local_sum_z = 0.0;
int local_sum_elem = 0;
if (coor[0] >= low_point[0] && coor[0] <= high_point[0] &&
coor[1] >= low_point[1] && coor[1] <= high_point[1] &&
coor[2] >= low_point[2] && coor[2] <= high_point[2]) {
local_sum_x += rElement.GetGeometry()[0].FastGetSolutionStepValue(VELOCITY_X);
local_sum_y += rElement.GetGeometry()[0].FastGetSolutionStepValue(VELOCITY_Y);
local_sum_z += rElement.GetGeometry()[0].FastGetSolutionStepValue(VELOCITY_Z);
local_sum_elem++;
}
for (int i = 0; i < 3; ++i) {
KRATOS_ERROR_IF(high_point[i] < low_point[i]) << "Check the limits of the Velocity Trap Box. Maximum coordinates smaller than minimum coordinates." << std::endl;
}
return std::make_tuple(local_sum_x, local_sum_y, local_sum_z, local_sum_elem); // note that these may have different types
});
if (number_of_elements) {
velocity_X /= number_of_elements;
velocity_Y /= number_of_elements;
velocity_Z /= number_of_elements;
}
array_1d<double,3> velocity;
velocity[0] = velocity_X;
velocity[1] = velocity_Y;
velocity[2] = velocity_Z;
return velocity;
}//VelocityTrap
void IntegrationOfForces(ModelPart::NodesContainerType& mesh_nodes , array_1d<double, 3>& total_forces,
array_1d<double, 3>& rotation_center, array_1d<double, 3>& total_moment) {
for (ModelPart::NodesContainerType::ptr_iterator node_pointer_it = mesh_nodes.ptr_begin();
node_pointer_it != mesh_nodes.ptr_end(); ++node_pointer_it) {
const array_1d<double, 3>& contact_forces_summed_at_structure_point = (*node_pointer_it)->FastGetSolutionStepValue(CONTACT_FORCES);
noalias(total_forces) += contact_forces_summed_at_structure_point;
array_1d<double, 3> vector_from_structure_center_to_structure_point;
noalias(vector_from_structure_center_to_structure_point) = (*node_pointer_it)->Coordinates() - rotation_center;
array_1d<double, 3> moment_to_add;
GeometryFunctions::CrossProduct(vector_from_structure_center_to_structure_point, contact_forces_summed_at_structure_point, moment_to_add);
noalias(total_moment) += moment_to_add;
}
}
void IntegrationOfElasticForces(ModelPart::NodesContainerType& mesh_nodes, array_1d<double, 3>& total_forces) {
for (ModelPart::NodesContainerType::ptr_iterator node_pointer_it = mesh_nodes.ptr_begin(); node_pointer_it != mesh_nodes.ptr_end(); ++node_pointer_it) {
const array_1d<double, 3> elastic_forces_added_up_at_node = (*node_pointer_it)->FastGetSolutionStepValue(ELASTIC_FORCES);
noalias(total_forces) += elastic_forces_added_up_at_node;
}
}
array_1d<double, 3> ComputePoisson(ModelPart& rModelPart) {
ElementsArrayType& pElements = rModelPart.GetCommunicator().LocalMesh().Elements();
double total_poisson_value = 0.0;
unsigned int number_of_spheres_to_evaluate_poisson = 0;
array_1d<double, 3> return_data = ZeroVector(3);
// TODO: Add OpenMP code
for (unsigned int k = 0; k < pElements.size(); k++) {
ElementsArrayType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
double& particle_poisson_value = p_sphere->GetGeometry()[0].FastGetSolutionStepValue(POISSON_VALUE);
particle_poisson_value = 0.0;
double epsilon_XY = 0.0;
double epsilon_Z = 0.0;
unsigned int number_of_neighbors_per_sphere_to_evaluate_poisson = 0;
array_1d<double, 3> other_to_me_vector;
array_1d<double, 3> initial_other_to_me_vector;
unsigned int number_of_neighbors = p_sphere->mNeighbourElements.size();
for (unsigned int i = 0; i < number_of_neighbors; i++) {
if (p_sphere->mNeighbourElements[i] == NULL) continue;
noalias(other_to_me_vector) = p_sphere->GetGeometry()[0].Coordinates() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].Coordinates();
noalias(initial_other_to_me_vector) = p_sphere->GetGeometry()[0].GetInitialPosition() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].GetInitialPosition();
double initial_distance_XY = sqrt(initial_other_to_me_vector[0] * initial_other_to_me_vector[0] + initial_other_to_me_vector[1] * initial_other_to_me_vector[1]);
double initial_distance_Z = initial_other_to_me_vector[2];
if (initial_distance_XY && initial_distance_Z) {
epsilon_XY = -1 + sqrt(other_to_me_vector[0] * other_to_me_vector[0] + other_to_me_vector[1] * other_to_me_vector[1]) / initial_distance_XY;
epsilon_Z = -1 + fabs(other_to_me_vector[2] / initial_distance_Z);
} else continue;
if (epsilon_Z) { // Should it be added here 'if p_sphere->Id() < p_sphere->mNeighbourElements[i]->Id()'?
if (((-epsilon_XY / epsilon_Z) > 0.5) || ((-epsilon_XY / epsilon_Z) < 0.0)) continue; // TODO: Check this
particle_poisson_value -= epsilon_XY / epsilon_Z;
number_of_neighbors_per_sphere_to_evaluate_poisson++;
} else continue;
}
if (number_of_neighbors_per_sphere_to_evaluate_poisson) {
particle_poisson_value /= number_of_neighbors_per_sphere_to_evaluate_poisson;
number_of_spheres_to_evaluate_poisson++;
total_poisson_value += particle_poisson_value;
}
}
if (number_of_spheres_to_evaluate_poisson) total_poisson_value /= number_of_spheres_to_evaluate_poisson;
return_data[0] = total_poisson_value;
return return_data;
} //ComputePoisson
array_1d<double, 3> ComputePoisson2D(ModelPart& rModelPart) { // TODO: Adjust this function to the new changes made in the 3D version
ElementsArrayType& pElements = rModelPart.GetCommunicator().LocalMesh().Elements();
double total_poisson_value = 0.0;
unsigned int number_of_bonds_to_evaluate_poisson = 0;
array_1d<double, 3> return_data = ZeroVector(3);
double total_epsilon_y_value = 0.0;
// TODO: Add OpenMP code
for (unsigned int k = 0; k < pElements.size(); k++) {
ElementsArrayType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
double& particle_poisson_value = p_sphere->GetGeometry()[0].FastGetSolutionStepValue(POISSON_VALUE);
particle_poisson_value = 0.0;
double epsilon_X = 0.0;
double epsilon_Y = 0.0;
unsigned int number_of_neighbors_to_evaluate_poisson = 0;
array_1d<double, 3> other_to_me_vector;
array_1d<double, 3> initial_other_to_me_vector;
double average_sphere_epsilon_y_value = 0.0;
unsigned int number_of_neighbors = p_sphere->mNeighbourElements.size();
for (unsigned int i = 0; i < number_of_neighbors; i++)
{
if (p_sphere->mNeighbourElements[i] == NULL) continue;
noalias(other_to_me_vector) = p_sphere->GetGeometry()[0].Coordinates() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].Coordinates();
noalias(initial_other_to_me_vector) = p_sphere->GetGeometry()[0].GetInitialPosition() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].GetInitialPosition();
double initial_distance_X = initial_other_to_me_vector[0];
double initial_distance_Y = initial_other_to_me_vector[1];
if (initial_distance_X && initial_distance_Y) {
epsilon_X = -1 + fabs(other_to_me_vector[0] / initial_distance_X);
epsilon_Y = -1 + fabs(other_to_me_vector[1] / initial_distance_Y);
}
if (epsilon_Y) {
particle_poisson_value -= epsilon_X / epsilon_Y;
number_of_neighbors_to_evaluate_poisson++;
total_poisson_value -= epsilon_X / epsilon_Y;
number_of_bonds_to_evaluate_poisson++;
}
average_sphere_epsilon_y_value += epsilon_Y;
}
if (number_of_neighbors_to_evaluate_poisson) particle_poisson_value /= number_of_neighbors_to_evaluate_poisson;
total_epsilon_y_value += average_sphere_epsilon_y_value / number_of_neighbors;
}
if (number_of_bonds_to_evaluate_poisson) total_poisson_value /= number_of_bonds_to_evaluate_poisson;
total_epsilon_y_value /= pElements.size();
return_data[0] = total_poisson_value;
return_data[1] = total_epsilon_y_value;
return return_data;
} //ComputePoisson2D
void ComputeEulerAngles(ModelPart& rSpheresModelPart, ModelPart& rClusterModelPart) {
ProcessInfo& r_process_info = rSpheresModelPart.GetProcessInfo();
bool if_trihedron_option = (bool) r_process_info[TRIHEDRON_OPTION];
typedef ModelPart::NodesContainerType NodesArrayType;
NodesArrayType& pSpheresNodes = rSpheresModelPart.GetCommunicator().LocalMesh().Nodes();
NodesArrayType& pClusterNodes = rClusterModelPart.GetCommunicator().LocalMesh().Nodes();
#pragma omp parallel for
for (int k = 0; k < (int) pSpheresNodes.size(); k++) {
ModelPart::NodeIterator i_iterator = pSpheresNodes.ptr_begin() + k;
Node < 3 > & i = *i_iterator;
array_1d<double, 3 >& rotated_angle = i.FastGetSolutionStepValue(PARTICLE_ROTATION_ANGLE);
if (if_trihedron_option && i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
array_1d<double, 3 >& EulerAngles = i.FastGetSolutionStepValue(EULER_ANGLES);
GeometryFunctions::EulerAnglesFromRotationAngle(EulerAngles, rotated_angle);
} // if_trihedron_option && Not BELONGS_TO_A_CLUSTER
}//for Node
#pragma omp parallel for
for (int k = 0; k < (int) pClusterNodes.size(); k++) {
ModelPart::NodeIterator i_iterator = pClusterNodes.ptr_begin() + k;
Node < 3 > & i = *i_iterator;
Quaternion<double>& Orientation = i.FastGetSolutionStepValue(ORIENTATION);
array_1d<double, 3 >& EulerAngles = i.FastGetSolutionStepValue(EULER_ANGLES);
GeometryFunctions::QuaternionToGiDEulerAngles(Orientation, EulerAngles);
}//for Node
} //ComputeEulerAngles
double QuasiStaticAdimensionalNumber(ModelPart& rParticlesModelPart, ModelPart& rContactModelPart, const ProcessInfo& r_process_info) {
ElementsArrayType& pParticleElements = rParticlesModelPart.GetCommunicator().LocalMesh().Elements();
array_1d<double,3> particle_forces;
const array_1d<double,3>& gravity = r_process_info[GRAVITY];
double total_force = block_for_each<MaxReduction<double>>(pParticleElements, [&](ModelPart::ElementType& rParticleElement) -> double {
Element::GeometryType& geom = rParticleElement.GetGeometry();
double local_force = 0.0;
const auto& n0 = geom[0];
if (n0.IsNot(DEMFlags::FIXED_VEL_X) &&
n0.IsNot(DEMFlags::FIXED_VEL_Y) &&
n0.IsNot(DEMFlags::FIXED_VEL_Z)){
particle_forces = n0.FastGetSolutionStepValue(TOTAL_FORCES);
double mass = n0.FastGetSolutionStepValue(NODAL_MASS);
particle_forces[0] += mass * gravity[0];
particle_forces[1] += mass * gravity[1];
particle_forces[2] += mass * gravity[2];
double module = 0.0;
GeometryFunctions::module(particle_forces, module);
local_force += module;
}
return local_force;
}); // note that the value to be reduced should be returned, in this case local_force.
ElementsArrayType& pContactElements = rContactModelPart.GetCommunicator().LocalMesh().Elements();
array_1d<double,3> contact_forces;
double total_elastic_force = block_for_each<MaxReduction<double>>(pContactElements, [&](ModelPart::ElementType& rContactElement) -> double {
Element::GeometryType& geom = rContactElement.GetGeometry();
double local_force = 0.0;
if (geom[0].IsNot(DEMFlags::FIXED_VEL_X) &&
geom[0].IsNot(DEMFlags::FIXED_VEL_Y) &&
geom[0].IsNot(DEMFlags::FIXED_VEL_Z)){
contact_forces = rContactElement.GetValue(LOCAL_CONTACT_FORCE);
double module = 0.0;
GeometryFunctions::module(contact_forces, module);
local_force += module;
}
return local_force;
}); // note that the value to be reduced should be returned, in this case local_force.
double adimensional_value = 0.0;
if (total_elastic_force != 0.0) {
adimensional_value = total_force/total_elastic_force;
}
else {
KRATOS_ERROR << "There are no elastic forces= " << total_elastic_force << std::endl;
}
return adimensional_value;
}//QuasiStaticAdimensionalNumber
}; // Class PostUtilities
} // namespace Kratos.
#endif // POST_UTILITIES_H
|
GB_unaryop__lnot_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int64
// op(A') function: GB_tran__lnot_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ewald.h | #ifndef ewald_h
#define ewald_h
#include "logger.h"
#include "types.h"
namespace exafmm_laplace {
class Ewald {
//! Wave structure for Ewald summation
struct Wave {
vec3 K; //!< 3-D wave number vector
real_t REAL; //!< real part of wave
real_t IMAG; //!< imaginary part of wave
};
typedef std::vector<Wave> Waves; //!< Vector of Wave types
typedef typename Waves::iterator W_iter; //!< Iterator of Wave types
private:
const int ksize; //!< Number of waves in Ewald summation
const real_t alpha; //!< Scaling parameter for Ewald summation
const real_t sigma; //!< Scaling parameter for Ewald summation
const real_t cutoff; //!< Cutoff distance
const vec3 cycle; //!< Periodic cycle
private:
//! Forward DFT
void dft(Waves & waves, Bodies & bodies) const {
vec3 scale;
for (int d=0; d<3; d++) scale[d]= 2 * M_PI / cycle[d]; // Scale conversion
#pragma omp parallel for
for (int w=0; w<int(waves.size()); w++) { // Loop over waves
W_iter W=waves.begin()+w; // Wave iterator
W->REAL = W->IMAG = 0; // Initialize waves
for (B_iter B=bodies.begin(); B!=bodies.end(); B++) { // Loop over bodies
real_t th = 0; // Initialize phase
for (int d=0; d<3; d++) th += W->K[d] * B->X[d] * scale[d];// Determine phase
W->REAL += B->SRC * std::cos(th); // Accumulate real component
W->IMAG += B->SRC * std::sin(th); // Accumulate imaginary component
} // End loop over bodies
} // End loop over waves
}
//! Inverse DFT
void idft(Waves & waves, Bodies & bodies) const {
vec3 scale;
for (int d=0; d<3; d++) scale[d] = 2 * M_PI / cycle[d]; // Scale conversion
#pragma omp parallel for
for (int b=0; b<int(bodies.size()); b++) { // Loop over bodies
B_iter B=bodies.begin()+b; // Body iterator
kvec4 TRG = kreal_t(0); // Initialize target values
for (W_iter W=waves.begin(); W!=waves.end(); W++) { // Loop over waves
real_t th = 0; // Initialzie phase
for (int d=0; d<3; d++) th += W->K[d] * B->X[d] * scale[d];// Determine phase
real_t dtmp = W->REAL * std::sin(th) - W->IMAG * std::cos(th);// Temporary value
TRG[0] += W->REAL * std::cos(th) + W->IMAG * std::sin(th);// Accumulate potential
for (int d=0; d<3; d++) TRG[d+1] -= dtmp * W->K[d]; // Accumulate force
} // End loop over waves
for (int d=0; d<3; d++) TRG[d+1] *= scale[d]; // Scale forces
B->TRG += TRG; // Copy results to bodies
} // End loop over bodies
}
//! Initialize wave vector
Waves initWaves() const {
Waves waves; // Initialzie wave vector
int kmaxsq = ksize * ksize; // kmax squared
int kmax = ksize; // kmax as integer
for (int l=0; l<=kmax; l++) { // Loop over x component
int mmin = -kmax; // Determine minimum y component
if (l==0) mmin = 0; // Exception for minimum y component
for (int m=mmin; m<=kmax; m++) { // Loop over y component
int nmin = -kmax; // Determine minimum z component
if (l==0 && m==0) nmin=1; // Exception for minimum z component
for (int n=nmin; n<=kmax; n++) { // Loop over z component
real_t ksq = l * l + m * m + n * n; // Wave number squared
if (ksq <= kmaxsq) { // If wave number is below kmax
Wave wave; // Initialzie wave structure
wave.K[0] = l; // x component of k
wave.K[1] = m; // y component of k
wave.K[2] = n; // z component of k
wave.REAL = wave.IMAG = 0; // Initialize amplitude
waves.push_back(wave); // Push wave to vector
} // End if for wave number
} // End loop over z component
} // End loop over y component
} // End loop over x component
return waves; // Return wave vector
}
//! Ewald real part P2P kernel
void P2P(C_iter Ci, C_iter Cj, vec3 Xperiodic) const {
for (B_iter Bi=Ci->BODY; Bi!=Ci->BODY+Ci->NBODY; Bi++) { // Loop over target bodies
for (B_iter Bj=Cj->BODY; Bj!=Cj->BODY+Cj->NBODY; Bj++) {// Loop over source bodies
vec3 dX = Bi->X - Bj->X - Xperiodic; // Distance vector from source to target
real_t R2 = norm(dX); // R^2
if (0 < R2 && R2 < cutoff * cutoff) { // Exclude self interaction and cutoff
real_t R2s = R2 * alpha * alpha; // (R * alpha)^2
real_t Rs = std::sqrt(R2s); // R * alpha
real_t invRs = 1 / Rs; // 1 / (R * alpha)
real_t invR2s = invRs * invRs; // 1 / (R * alpha)^2
real_t invR3s = invR2s * invRs; // 1 / (R * alpha)^3
real_t dtmp = Bj->SRC * (M_2_SQRTPI * std::exp(-R2s) * invR2s + erfc(Rs) * invR3s);
dtmp *= alpha * alpha * alpha; // Scale temporary value
Bi->TRG[0] += Bj->SRC * erfc(Rs) * invRs * alpha; // Ewald real potential
Bi->TRG[1] -= dX[0] * dtmp; // x component of Ewald real force
Bi->TRG[2] -= dX[1] * dtmp; // y component of Ewald real force
Bi->TRG[3] -= dX[2] * dtmp; // z component of Ewald real force
} // End if for self interaction
} // End loop over source bodies
} // End loop over target bodies
}
//! Recursive functor for traversing tree to find neighbors
struct Neighbor {
Ewald * ewald; //!< Ewald object
C_iter Ci; //!< Iterator of current target cell
C_iter Cj; //!< Iterator of current source cell
C_iter C0; //!< Iterator of first source cell
Neighbor(Ewald * _ewald, C_iter _Ci, C_iter _Cj, C_iter _C0) :// Constructor
ewald(_ewald), Ci(_Ci), Cj(_Cj), C0(_C0) {} // Initialize variables
void operator() () const { // Overload operator()
vec3 dX = Ci->X - Cj->X; // Distance vector from source to target
wrap(dX, ewald->cycle); // Wrap around periodic domain
vec3 Xperiodic = Ci->X - Cj->X - dX; // Coordinate offset for periodic B.C.
real_t R = std::sqrt(norm(dX)); // Scalar distance
if (R - Ci->R - Cj->R < sqrtf(3) * ewald->cutoff) { // If cells are close
if(Cj->NCHILD == 0) ewald->P2P(Ci,Cj,Xperiodic); // Ewald real part
for (C_iter CC=C0+Cj->ICHILD; CC!=C0+Cj->ICHILD+Cj->NCHILD; CC++) {// Loop over cell's children
Neighbor neighbor(ewald, Ci, CC, C0); // Instantiate recursive functor
neighbor(); // Recursive call
} // End loop over cell's children
} // End if for far cells
} // End overload operator()
};
public:
//! Constructor
Ewald(int _ksize, real_t _alpha, real_t _sigma, real_t _cutoff, vec3 _cycle) :
ksize(_ksize), alpha(_alpha), sigma(_sigma), cutoff(_cutoff), cycle(_cycle) {} // Initialize variables
//! Ewald real part
void realPart(Cells & cells, Cells & jcells) {
logger::startTimer("Ewald real part"); // Start timer
C_iter Cj = jcells.begin(); // Set begin iterator of source cells
mk_task_group; // Intitialize tasks
for (C_iter Ci=cells.begin(); Ci!=cells.end(); Ci++) { // Loop over target cells
if (Ci->NCHILD == 0) { // If target cell is leaf
Neighbor neighbor(this, Ci, Cj, Cj); // Instantiate recursive functor
create_taskc(neighbor); // Create task for recursive call
} // End if for leaf target cell
} // End loop over target cells
wait_tasks; // Synchronize tasks
logger::stopTimer("Ewald real part"); // Stop timer
}
//! Subtract self term
void selfTerm(Bodies & bodies) {
for (B_iter B=bodies.begin(); B!=bodies.end(); B++) { // Loop over all bodies
B->TRG[0] -= M_2_SQRTPI * B->SRC * alpha; // Self term of Ewald real part
} // End loop over all bodies in cell
}
//! Ewald wave part
void wavePart(Bodies & bodies, Bodies & jbodies) {
logger::startTimer("Ewald wave part"); // Start timer
Waves waves = initWaves(); // Initialize wave vector
dft(waves,jbodies); // Apply DFT to bodies to get waves
vec3 scale;
for (int d=0; d<3; d++) scale[d] = 2 * M_PI / cycle[d]; // Scale conversion
real_t coef = 2 / sigma / cycle[0] / cycle[1] / cycle[2]; // First constant
real_t coef2 = 1 / (4 * alpha * alpha); // Second constant
for (W_iter W=waves.begin(); W!=waves.end(); W++) { // Loop over waves
vec3 K = W->K * scale; // Wave number scaled
real_t K2 = norm(K); // Wave number squared
real_t factor = coef * std::exp(-K2 * coef2) / K2; // Wave factor
W->REAL *= factor; // Apply wave factor to real part
W->IMAG *= factor; // Apply wave factor to imaginary part
} // End loop over waves
idft(waves,bodies); // Inverse DFT
logger::stopTimer("Ewald wave part"); // Stop timer
}
void print(int stringLength) {
if (logger::verbose) { // If verbose flag is true
std::cout << std::setw(stringLength) << std::fixed << std::left// Set format
<< "ksize" << " : " << ksize << std::endl // Print ksize
<< std::setw(stringLength) // Set format
<< "alpha" << " : " << alpha << std::endl // Print alpha
<< std::setw(stringLength) // Set format
<< "sigma" << " : " << sigma << std::endl // Print sigma
<< std::setw(stringLength) // Set format
<< "cutoff" << " : " << cutoff << std::endl // Print cutoff
<< std::setw(stringLength) // Set format
<< "cycle" << " : " << cycle << std::endl; // Print cycle
} // End if for verbose flag
}
};
}
#endif
|
timing_kernels.c | #include <inttypes.h>
#include <unistd.h>
#include <sys/time.h>
#include <assert.h>
#include <math.h>
#include <papi.h>
#include <omp.h>
#include "prepareArray.h"
#include "timing_kernels.h"
// For do_work macro in the header file
volatile double x,y;
extern int max_size;
char* eventname = NULL;
run_output_t probeBufferSize(int active_buf_len, int line_size, float pageCountPerBlock, int pattern, uintptr_t **v, uintptr_t *rslt, int latency_only, int mode, int ONT){
int _papi_eventset = PAPI_NULL;
int retval, buffer = 0, status = 0;
int error_line = -1, error_type = PAPI_OK;
register uintptr_t *p = NULL;
register uintptr_t p_prime;
double time1, time2, dt, factor;
long count, pageSize, blockSize;
long long int counter[ONT];
run_output_t out;
out.status = 0;
assert( sizeof(int) >= 4 );
x = (double)*rslt;
x = floor(1.3*x/(1.4*x+1.8));
y = x*3.97;
if( x > 0 || y > 0 )
printf("WARNING: x=%lf y=%lf\n",x,y);
// Make no fewer accesses than we would for a buffer of size 128KB.
long countMax;
unsigned long threshold = 128*1024;
if( active_buf_len*sizeof(uintptr_t) > threshold )
countMax = 50*((long)active_buf_len)/line_size;
else
countMax = 50*threshold/line_size;
// Get the size of a page of memory.
pageSize = sysconf(_SC_PAGESIZE)/sizeof(uintptr_t);
if( pageSize <= 0 ){
fprintf(stderr,"Cannot determine pagesize, sysconf() returned an error code.\n");
out.status = -1;
return out;
}
// Compute the size of a block in the pointer chain and create the pointer chain.
blockSize = (long)(pageCountPerBlock*(float)pageSize);
#pragma omp parallel reduction(+:status) default(shared)
{
int idx = omp_get_thread_num();
status += prepareArray(v[idx], active_buf_len, line_size, blockSize, pattern);
}
// Start of threaded benchmark.
#pragma omp parallel private(p,count,dt,factor,time1,time2,retval) reduction(+:buffer) reduction(+:status) firstprivate(_papi_eventset) default(shared)
{
int idx = omp_get_thread_num();
int thdStatus = 0;
// Initialize the result to a value indicating an error.
// If no error occurs, it will be overwritten.
if ( !latency_only ) {
out.counter[idx] = -1;
}
// We will use "p" even after the epilogue, so let's set
// it here in case an error occurs.
p = &v[idx][0];
count = countMax;
if ( !latency_only ) {
retval = PAPI_create_eventset( &_papi_eventset );
if (retval != PAPI_OK ){
error_type = retval;
error_line = __LINE__;
thdStatus = -1;
// If we can't measure events, no need to run the kernel.
goto skip_epilogue;
}
retval = PAPI_add_named_event( _papi_eventset, eventname );
if (retval != PAPI_OK ){
error_type = retval;
error_line = __LINE__;
thdStatus = -1;
// If we can't measure events, no need to run the kernel.
goto clean_up;
}
// Start the counters.
retval = PAPI_start(_papi_eventset);
if ( PAPI_OK != retval ) {
error_type = retval;
error_line = __LINE__;
thdStatus = -1;
// If we can't measure events, no need to run the kernel.
goto clean_up;
}
}
// Start the actual test.
// Micro-kernel for memory reading.
if( CACHE_READ_ONLY == mode || latency_only )
{
if( latency_only ) time1 = getticks();
while(count > 0){
N_128;
count -= 128;
}
if( latency_only ) time2 = getticks();
}
// Micro-kernel for memory writing.
else
{
while(count > 0){
NW_128;
count -= 128;
}
}
if ( !latency_only ) {
// Stop the counters.
retval = PAPI_stop(_papi_eventset, &counter[idx]);
if ( PAPI_OK != retval ) {
error_type = retval;
error_line = __LINE__;
thdStatus = -1;
goto clean_up;
}
// Get the average event count per access in pointer chase.
out.counter[idx] = (1.0*counter[idx])/(1.0*countMax);
clean_up:
retval = PAPI_cleanup_eventset(_papi_eventset);
if (retval != PAPI_OK ){
error_type = retval;
error_line = __LINE__;
thdStatus = -1;
}
retval = PAPI_destroy_eventset(&_papi_eventset);
if (retval != PAPI_OK ){
error_type = retval;
error_line = __LINE__;
thdStatus = -1;
}
}else{
// Compute the duration of the pointer chase.
dt = elapsed(time2, time1);
// Convert time into nanoseconds.
factor = 1000.0;
// Number of accesses per pointer chase.
factor /= (1.0*countMax);
// Get the average nanoseconds per access.
out.dt[idx] = dt*factor;
}
skip_epilogue:
buffer += (uintptr_t)p+(uintptr_t)(x+y);
status += thdStatus;
}
// Get the collective status.
if(status < 0) {
error_handler(error_type, error_line);
out.status = -1;
}
// Prevent compiler optimization.
*rslt = buffer;
return out;
}
void error_handler(int e, int line){
int idx;
const char *errors[26] = {
"No error",
"Invalid argument",
"Insufficient memory",
"A System/C library call failed",
"Not supported by component",
"Access to the counters was lost or interrupted",
"Internal error, please send mail to the developers",
"Event does not exist",
"Event exists, but cannot be counted due to counter resource limitations",
"EventSet is currently not running",
"EventSet is currently counting",
"No such EventSet Available",
"Event in argument is not a valid preset",
"Hardware does not support performance counters",
"Unknown error code",
"Permission level does not permit operation",
"PAPI hasn't been initialized yet",
"Component Index isn't set",
"Not supported",
"Not implemented",
"Buffer size exceeded",
"EventSet domain is not supported for the operation",
"Invalid or missing event attributes",
"Too many events or attributes",
"Bad combination of features",
"Component containing event is disabled"
};
idx = -e;
if(idx >= 26 || idx < 0 )
idx = 15;
if( NULL != eventname )
fprintf(stderr,"\nError \"%s\" occured at line %d when processing event %s.\n", errors[idx], line, eventname);
else
fprintf(stderr,"\nError \"%s\" occured at line %d.\n", errors[idx], line);
}
|
sparselu.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#include "bots.h"
#include "sparselu.h"
/***********************************************************************
* checkmat:
**********************************************************************/
int checkmat (float *M, float *N)
{
int i, j;
float r_err;
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j];
if ( r_err == 0.0 ) continue;
if (r_err < 0.0 ) r_err = -r_err;
if ( M[i*bots_arg_size_1+j] == 0 )
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]);
return FALSE;
}
r_err = r_err / M[i*bots_arg_size_1+j];
if(r_err > EPSILON)
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
return FALSE;
}
}
}
return TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
void genmat (float *M[])
{
int null_entry, init_val, i, j, ii, jj;
float *p;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
if ((M[ii*bots_arg_size+jj] == NULL))
{
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (float)((init_val - 32768.0) / 16384.0);
p++;
}
}
}
else
{
M[ii*bots_arg_size+jj] = NULL;
}
}
}
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char *name, float *M[])
{
int ii, jj;
bots_message("Structure for matrix %s @ 0x%p\n",name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");}
else bots_message(" ");
}
bots_message("\n");
}
bots_message("\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
float * allocate_clean_block()
{
int i,j;
float *p, *q;
p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
q=p;
if (p!=NULL){
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;}
}
else
{
bots_message("Error: Out of memory\n");
exit (101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(float *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(float *diag, float *row)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (k=0; k<bots_arg_size_1; k++)
{
row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(float *row, float *col, float *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(float *diag, float *col)
{
int i, j, k;
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
void sparselu_init (float ***pBENCH, char *pass)
{
*pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *));
genmat(*pBENCH);
print_structure(pass, *pBENCH);
}
void sparselu_par_call(float **BENCH)
{
int ii, jj, kk;
bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
#pragma omp parallel
#pragma omp single nowait
#pragma omp task untied
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
#pragma omp task untied firstprivate(kk, ii) shared(BENCH)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
#pragma omp taskwait
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
#pragma omp taskwait
}
bots_message(" completed!\n");
}
void sparselu_seq_call(float **BENCH)
{
int ii, jj, kk;
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
void sparselu_fini (float **BENCH, char *pass)
{
print_structure(pass, BENCH);
}
int sparselu_check(float **SEQ, float **BENCH)
{
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
GB_unop__isnan_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__isnan_bool_fp32
// op(A') function: GB_unop_tran__isnan_bool_fp32
// C type: bool
// A type: float
// cast: float cij = (aij)
// unaryop: cij = isnan (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isnan (x) ;
// casting
#define GB_CAST(z, aij) \
float z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (aij) ; \
Cx [pC] = isnan (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__isnan_bool_fp32
(
bool *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isnan (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__isnan_bool_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__isfinite_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__isfinite_bool_fp32
// op(A') function: GB_unop_tran__isfinite_bool_fp32
// C type: bool
// A type: float
// cast: float cij = (aij)
// unaryop: cij = isfinite (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isfinite (x) ;
// casting
#define GB_CAST(z, aij) \
float z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (aij) ; \
Cx [pC] = isfinite (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__isfinite_bool_fp32
(
bool *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isfinite (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__isfinite_bool_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
scatter.c | #include "../../shared.h"
#include "../hale_data.h"
#include "hale.h"
#include <float.h>
#include <stdio.h>
// Scatter the subcell energy and mass quantities back to the cell centers
void scatter_energy_and_mass(
const int ncells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* cell_volume, double* energy, double* density,
double* kinetic_energy, double* velocity_x, double* velocity_y,
double* velocity_z, double* cell_mass, double* subcell_mass,
double* subcell_ie_mass, double* subcell_ke_mass, int* faces_to_nodes,
int* faces_to_nodes_offsets, int* cells_to_faces_offsets,
int* cells_to_faces, int* cells_to_nodes, int* cells_to_nodes_offsets,
double initial_mass, double initial_ie_mass, double initial_ke_mass);
// Scatter the subcell momentum to the node centered velocities
void scatter_momentum(const int nnodes, vec_t* initial_momentum,
int* nodes_to_cells_offsets, int* nodes_to_cells,
int* cells_to_nodes_offsets, int* cells_to_nodes,
double* velocity_x, double* velocity_y,
double* velocity_z, double* nodal_mass,
double* subcell_mass, double* subcell_momentum_x,
double* subcell_momentum_y, double* subcell_momentum_z);
// Perform the scatter step of the ALE remapping algorithm
void scatter_phase(UnstructuredMesh* umesh, HaleData* hale_data,
vec_t* initial_momentum, double initial_mass,
double initial_ie_mass, double initial_ke_mass) {
// Calculates the cell volume, subcell volume and the subcell centroids
calc_volumes_centroids(
umesh->ncells, umesh->nnodes, hale_data->nnodes_by_subcell,
umesh->cells_to_nodes_offsets, umesh->cells_to_nodes,
hale_data->subcells_to_faces_offsets, hale_data->subcells_to_faces,
umesh->faces_to_nodes, umesh->faces_to_nodes_offsets,
umesh->faces_cclockwise_cell, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, hale_data->subcell_centroids_x,
hale_data->subcell_centroids_y, hale_data->subcell_centroids_z,
hale_data->subcell_volume, hale_data->cell_volume,
hale_data->nodal_volumes, umesh->nodes_to_cells_offsets, umesh->nodes_to_cells);
// Scatter the subcell momentum to the node centered velocities
scatter_momentum(umesh->nnodes, initial_momentum, umesh->nodes_to_cells_offsets,
umesh->nodes_to_cells, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0,
hale_data->nodal_mass, hale_data->subcell_mass,
hale_data->subcell_momentum_x, hale_data->subcell_momentum_y,
hale_data->subcell_momentum_z);
// Scatter the subcell energy and mass quantities back to the cell centers
scatter_energy_and_mass(
umesh->ncells, umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0,
hale_data->cell_volume, hale_data->energy0, hale_data->density0,
hale_data->ke_mass, hale_data->velocity_x0, hale_data->velocity_y0,
hale_data->velocity_z0, hale_data->cell_mass, hale_data->subcell_mass,
hale_data->subcell_ie_mass, hale_data->subcell_ke_mass,
umesh->faces_to_nodes, umesh->faces_to_nodes_offsets,
umesh->cells_to_faces_offsets, umesh->cells_to_faces,
umesh->cells_to_nodes, umesh->cells_to_nodes_offsets,
initial_mass, initial_ie_mass, initial_ke_mass);
}
// Scatter the subcell energy and mass quantities back to the cell centers
void scatter_energy_and_mass(
const int ncells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* cell_volume, double* energy, double* density,
double* ke_mass, double* velocity_x, double* velocity_y, double* velocity_z,
double* cell_mass, double* subcell_mass, double* subcell_ie_mass,
double* subcell_ke_mass, int* faces_to_nodes, int* faces_to_nodes_offsets,
int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes,
int* cells_to_nodes_offsets, double initial_mass,
double initial_ie_mass, double initial_ke_mass) {
// Scatter energy and density, and print the conservation of mass
double rz_total_mass = 0.0;
double rz_total_e_mass = 0.0;
#pragma omp parallel for reduction(+ : rz_total_mass, rz_total_e_mass)
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell = cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
double total_mass = 0.0;
double new_ke_mass = 0.0;
double total_ie_mass = 0.0;
double total_ke_mass = 0.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
const int subcell_index = cell_to_nodes_off + nn;
total_mass += subcell_mass[(subcell_index)];
total_ie_mass += subcell_ie_mass[(subcell_index)];
total_ke_mass += subcell_ke_mass[(subcell_index)];
new_ke_mass += subcell_mass[(subcell_index)] * 0.5 *
(velocity_x[(node_index)] * velocity_x[(node_index)] +
velocity_y[(node_index)] * velocity_y[(node_index)] +
velocity_z[(node_index)] * velocity_z[(node_index)]);
}
// Update the volume of the cell to the new rezoned mesh
vec_t cell_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_cell, nodes_x, nodes_y, nodes_z, cells_to_nodes,
cell_to_nodes_off, &cell_c);
calc_volume(cell_to_faces_off, nfaces_by_cell, cells_to_faces,
faces_to_nodes, faces_to_nodes_offsets, nodes_x, nodes_y,
nodes_z, &cell_c, &cell_volume[(cc)]);
// Scatter the energy and density
cell_mass[(cc)] = total_mass;
density[(cc)] = cell_mass[(cc)] / cell_volume[(cc)];
const double total_e_mass = total_ie_mass + (total_ke_mass - new_ke_mass);
energy[(cc)] = total_e_mass / cell_mass[(cc)];
// Calculate the conservation data
rz_total_mass += total_mass;
rz_total_e_mass += total_e_mass;
}
printf("Initial Total Mass %.12f\n", initial_mass);
printf("Rezoned Total Mass %.12f\n", rz_total_mass);
printf("Difference %.12f\n\n", rz_total_mass - initial_mass);
printf("Initial Total Energy %.12f\n",
(initial_ie_mass + initial_ke_mass));
printf("Rezoned Total Internal Energy %.12f\n", rz_total_e_mass);
printf("Difference %.12f\n\n",
rz_total_e_mass - (initial_ie_mass + initial_ke_mass));
}
// Scatter the subcell momentum to the node centered velocities
void scatter_momentum(const int nnodes, vec_t* initial_momentum,
int* nodes_to_cells_offsets, int* nodes_to_cells,
int* cells_to_nodes_offsets, int* cells_to_nodes,
double* velocity_x, double* velocity_y,
double* velocity_z, double* nodal_mass,
double* subcell_mass, double* subcell_momentum_x,
double* subcell_momentum_y, double* subcell_momentum_z) {
double total_momentum_x = 0.0;
double total_momentum_y = 0.0;
double total_momentum_z = 0.0;
#pragma omp parallel for reduction(+ : total_momentum_x, total_momentum_y, \
total_momentum_z)
for (int nn = 0; nn < nnodes; ++nn) {
const int node_to_cells_off = nodes_to_cells_offsets[(nn)];
const int ncells_by_node =
nodes_to_cells_offsets[(nn + 1)] - node_to_cells_off;
double mass_at_node = 0.0;
double node_momentum_x = 0.0;
double node_momentum_y = 0.0;
double node_momentum_z = 0.0;
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
const int cell_to_nodes_off = cells_to_nodes_offsets[(cell_index)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cell_index + 1)] - cell_to_nodes_off;
// Determine the position of the node in the cell
int nn2;
for (nn2 = 0; nn2 < nnodes_by_cell; ++nn2) {
if (cells_to_nodes[(cell_to_nodes_off + nn2)] == nn) {
break;
}
}
const int subcell_index = cell_to_nodes_off + nn2;
node_momentum_x += subcell_momentum_x[(subcell_index)];
node_momentum_y += subcell_momentum_y[(subcell_index)];
node_momentum_z += subcell_momentum_z[(subcell_index)];
mass_at_node += subcell_mass[(subcell_index)];
}
nodal_mass[(nn)] = mass_at_node;
total_momentum_x += node_momentum_x;
total_momentum_y += node_momentum_y;
total_momentum_z += node_momentum_z;
velocity_x[(nn)] = node_momentum_x / nodal_mass[(nn)];
velocity_y[(nn)] = node_momentum_y / nodal_mass[(nn)];
velocity_z[(nn)] = node_momentum_z / nodal_mass[(nn)];
}
printf("Initial total momentum %.12f %.12f %.12f\n", initial_momentum->x,
initial_momentum->y, initial_momentum->z);
printf("Rezoned total momentum %.12f %.12f %.12f\n", total_momentum_x,
total_momentum_y, total_momentum_z);
printf("Difference %.12f %.12f %.12f\n\n",
initial_momentum->x - total_momentum_x,
initial_momentum->y - total_momentum_y,
initial_momentum->z - total_momentum_z);
}
|
parser.c | /* C++ Parser.
Copyright (C) 2000, 2001, 2002, 2003, 2004,
2005, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
Written by Mark Mitchell <mark@codesourcery.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "timevar.h"
#include "cpplib.h"
#include "tree.h"
#include "cp-tree.h"
#include "intl.h"
#include "c-family/c-pragma.h"
#include "decl.h"
#include "flags.h"
#include "diagnostic-core.h"
#include "output.h"
#include "target.h"
#include "cgraph.h"
#include "c-family/c-common.h"
#include "c-family/c-objc.h"
#include "plugin.h"
#include "tree-pretty-print.h"
#include "parser.h"
/* The lexer. */
/* The cp_lexer_* routines mediate between the lexer proper (in libcpp
and c-lex.c) and the C++ parser. */
static cp_token eof_token =
{
CPP_EOF, RID_MAX, 0, PRAGMA_NONE, false, false, false, 0, { NULL }
};
/* The various kinds of non integral constant we encounter. */
typedef enum non_integral_constant {
NIC_NONE,
/* floating-point literal */
NIC_FLOAT,
/* %<this%> */
NIC_THIS,
/* %<__FUNCTION__%> */
NIC_FUNC_NAME,
/* %<__PRETTY_FUNCTION__%> */
NIC_PRETTY_FUNC,
/* %<__func__%> */
NIC_C99_FUNC,
/* "%<va_arg%> */
NIC_VA_ARG,
/* a cast */
NIC_CAST,
/* %<typeid%> operator */
NIC_TYPEID,
/* non-constant compound literals */
NIC_NCC,
/* a function call */
NIC_FUNC_CALL,
/* an increment */
NIC_INC,
/* an decrement */
NIC_DEC,
/* an array reference */
NIC_ARRAY_REF,
/* %<->%> */
NIC_ARROW,
/* %<.%> */
NIC_POINT,
/* the address of a label */
NIC_ADDR_LABEL,
/* %<*%> */
NIC_STAR,
/* %<&%> */
NIC_ADDR,
/* %<++%> */
NIC_PREINCREMENT,
/* %<--%> */
NIC_PREDECREMENT,
/* %<new%> */
NIC_NEW,
/* %<delete%> */
NIC_DEL,
/* calls to overloaded operators */
NIC_OVERLOADED,
/* an assignment */
NIC_ASSIGNMENT,
/* a comma operator */
NIC_COMMA,
/* a call to a constructor */
NIC_CONSTRUCTOR,
/* a transaction expression */
NIC_TRANSACTION
} non_integral_constant;
/* The various kinds of errors about name-lookup failing. */
typedef enum name_lookup_error {
/* NULL */
NLE_NULL,
/* is not a type */
NLE_TYPE,
/* is not a class or namespace */
NLE_CXX98,
/* is not a class, namespace, or enumeration */
NLE_NOT_CXX98
} name_lookup_error;
/* The various kinds of required token */
typedef enum required_token {
RT_NONE,
RT_SEMICOLON, /* ';' */
RT_OPEN_PAREN, /* '(' */
RT_CLOSE_BRACE, /* '}' */
RT_OPEN_BRACE, /* '{' */
RT_CLOSE_SQUARE, /* ']' */
RT_OPEN_SQUARE, /* '[' */
RT_COMMA, /* ',' */
RT_SCOPE, /* '::' */
RT_LESS, /* '<' */
RT_GREATER, /* '>' */
RT_EQ, /* '=' */
RT_ELLIPSIS, /* '...' */
RT_MULT, /* '*' */
RT_COMPL, /* '~' */
RT_COLON, /* ':' */
RT_COLON_SCOPE, /* ':' or '::' */
RT_CLOSE_PAREN, /* ')' */
RT_COMMA_CLOSE_PAREN, /* ',' or ')' */
RT_PRAGMA_EOL, /* end of line */
RT_NAME, /* identifier */
/* The type is CPP_KEYWORD */
RT_NEW, /* new */
RT_DELETE, /* delete */
RT_RETURN, /* return */
RT_WHILE, /* while */
RT_EXTERN, /* extern */
RT_STATIC_ASSERT, /* static_assert */
RT_DECLTYPE, /* decltype */
RT_OPERATOR, /* operator */
RT_CLASS, /* class */
RT_TEMPLATE, /* template */
RT_NAMESPACE, /* namespace */
RT_USING, /* using */
RT_ASM, /* asm */
RT_TRY, /* try */
RT_CATCH, /* catch */
RT_THROW, /* throw */
RT_LABEL, /* __label__ */
RT_AT_TRY, /* @try */
RT_AT_SYNCHRONIZED, /* @synchronized */
RT_AT_THROW, /* @throw */
RT_SELECT, /* selection-statement */
RT_INTERATION, /* iteration-statement */
RT_JUMP, /* jump-statement */
RT_CLASS_KEY, /* class-key */
RT_CLASS_TYPENAME_TEMPLATE, /* class, typename, or template */
RT_TRANSACTION_ATOMIC, /* __transaction_atomic */
RT_TRANSACTION_RELAXED, /* __transaction_relaxed */
RT_TRANSACTION_CANCEL /* __transaction_cancel */
} required_token;
/* Prototypes. */
static cp_lexer *cp_lexer_new_main
(void);
static cp_lexer *cp_lexer_new_from_tokens
(cp_token_cache *tokens);
static void cp_lexer_destroy
(cp_lexer *);
static int cp_lexer_saving_tokens
(const cp_lexer *);
static cp_token *cp_lexer_token_at
(cp_lexer *, cp_token_position);
static void cp_lexer_get_preprocessor_token
(cp_lexer *, cp_token *);
static inline cp_token *cp_lexer_peek_token
(cp_lexer *);
static cp_token *cp_lexer_peek_nth_token
(cp_lexer *, size_t);
static inline bool cp_lexer_next_token_is
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_not
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_keyword
(cp_lexer *, enum rid);
static cp_token *cp_lexer_consume_token
(cp_lexer *);
static void cp_lexer_purge_token
(cp_lexer *);
static void cp_lexer_purge_tokens_after
(cp_lexer *, cp_token_position);
static void cp_lexer_save_tokens
(cp_lexer *);
static void cp_lexer_commit_tokens
(cp_lexer *);
static void cp_lexer_rollback_tokens
(cp_lexer *);
static void cp_lexer_print_token
(FILE *, cp_token *);
static inline bool cp_lexer_debugging_p
(cp_lexer *);
static void cp_lexer_start_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
static void cp_lexer_stop_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
static cp_token_cache *cp_token_cache_new
(cp_token *, cp_token *);
static void cp_parser_initial_pragma
(cp_token *);
static tree cp_literal_operator_id
(const char *);
/* Manifest constants. */
#define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token))
#define CP_SAVED_TOKEN_STACK 5
/* Variables. */
/* The stream to which debugging output should be written. */
static FILE *cp_lexer_debug_stream;
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. */
int cp_unevaluated_operand;
/* Dump up to NUM tokens in BUFFER to FILE starting with token
START_TOKEN. If START_TOKEN is NULL, the dump starts with the
first token in BUFFER. If NUM is 0, dump all the tokens. If
CURR_TOKEN is set and it is one of the tokens in BUFFER, it will be
highlighted by surrounding it in [[ ]]. */
static void
cp_lexer_dump_tokens (FILE *file, VEC(cp_token,gc) *buffer,
cp_token *start_token, unsigned num,
cp_token *curr_token)
{
unsigned i, nprinted;
cp_token *token;
bool do_print;
fprintf (file, "%u tokens\n", VEC_length (cp_token, buffer));
if (buffer == NULL)
return;
if (num == 0)
num = VEC_length (cp_token, buffer);
if (start_token == NULL)
start_token = VEC_address (cp_token, buffer);
if (start_token > VEC_address (cp_token, buffer))
{
cp_lexer_print_token (file, VEC_index (cp_token, buffer, 0));
fprintf (file, " ... ");
}
do_print = false;
nprinted = 0;
for (i = 0; VEC_iterate (cp_token, buffer, i, token) && nprinted < num; i++)
{
if (token == start_token)
do_print = true;
if (!do_print)
continue;
nprinted++;
if (token == curr_token)
fprintf (file, "[[");
cp_lexer_print_token (file, token);
if (token == curr_token)
fprintf (file, "]]");
switch (token->type)
{
case CPP_SEMICOLON:
case CPP_OPEN_BRACE:
case CPP_CLOSE_BRACE:
case CPP_EOF:
fputc ('\n', file);
break;
default:
fputc (' ', file);
}
}
if (i == num && i < VEC_length (cp_token, buffer))
{
fprintf (file, " ... ");
cp_lexer_print_token (file, VEC_index (cp_token, buffer,
VEC_length (cp_token, buffer) - 1));
}
fprintf (file, "\n");
}
/* Dump all tokens in BUFFER to stderr. */
void
cp_lexer_debug_tokens (VEC(cp_token,gc) *buffer)
{
cp_lexer_dump_tokens (stderr, buffer, NULL, 0, NULL);
}
/* Dump the cp_parser tree field T to FILE if T is non-NULL. DESC is the
description for T. */
static void
cp_debug_print_tree_if_set (FILE *file, const char *desc, tree t)
{
if (t)
{
fprintf (file, "%s: ", desc);
print_node_brief (file, "", t, 0);
}
}
/* Dump parser context C to FILE. */
static void
cp_debug_print_context (FILE *file, cp_parser_context *c)
{
const char *status_s[] = { "OK", "ERROR", "COMMITTED" };
fprintf (file, "{ status = %s, scope = ", status_s[c->status]);
print_node_brief (file, "", c->object_type, 0);
fprintf (file, "}\n");
}
/* Print the stack of parsing contexts to FILE starting with FIRST. */
static void
cp_debug_print_context_stack (FILE *file, cp_parser_context *first)
{
unsigned i;
cp_parser_context *c;
fprintf (file, "Parsing context stack:\n");
for (i = 0, c = first; c; c = c->next, i++)
{
fprintf (file, "\t#%u: ", i);
cp_debug_print_context (file, c);
}
}
/* Print the value of FLAG to FILE. DESC is a string describing the flag. */
static void
cp_debug_print_flag (FILE *file, const char *desc, bool flag)
{
if (flag)
fprintf (file, "%s: true\n", desc);
}
/* Print an unparsed function entry UF to FILE. */
static void
cp_debug_print_unparsed_function (FILE *file, cp_unparsed_functions_entry *uf)
{
unsigned i;
cp_default_arg_entry *default_arg_fn;
tree fn;
fprintf (file, "\tFunctions with default args:\n");
for (i = 0;
VEC_iterate (cp_default_arg_entry, uf->funs_with_default_args, i,
default_arg_fn);
i++)
{
fprintf (file, "\t\tClass type: ");
print_node_brief (file, "", default_arg_fn->class_type, 0);
fprintf (file, "\t\tDeclaration: ");
print_node_brief (file, "", default_arg_fn->decl, 0);
fprintf (file, "\n");
}
fprintf (file, "\n\tFunctions with definitions that require "
"post-processing\n\t\t");
for (i = 0; VEC_iterate (tree, uf->funs_with_definitions, i, fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
}
fprintf (file, "\n");
fprintf (file, "\n\tNon-static data members with initializers that require "
"post-processing\n\t\t");
for (i = 0; VEC_iterate (tree, uf->nsdmis, i, fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
}
fprintf (file, "\n");
}
/* Print the stack of unparsed member functions S to FILE. */
static void
cp_debug_print_unparsed_queues (FILE *file,
VEC(cp_unparsed_functions_entry, gc) *s)
{
unsigned i;
cp_unparsed_functions_entry *uf;
fprintf (file, "Unparsed functions\n");
for (i = 0; VEC_iterate (cp_unparsed_functions_entry, s, i, uf); i++)
{
fprintf (file, "#%u:\n", i);
cp_debug_print_unparsed_function (file, uf);
}
}
/* Dump the tokens in a window of size WINDOW_SIZE around the next_token for
the given PARSER. If FILE is NULL, the output is printed on stderr. */
static void
cp_debug_parser_tokens (FILE *file, cp_parser *parser, int window_size)
{
cp_token *next_token, *first_token, *start_token;
if (file == NULL)
file = stderr;
next_token = parser->lexer->next_token;
first_token = VEC_address (cp_token, parser->lexer->buffer);
start_token = (next_token > first_token + window_size / 2)
? next_token - window_size / 2
: first_token;
cp_lexer_dump_tokens (file, parser->lexer->buffer, start_token, window_size,
next_token);
}
/* Dump debugging information for the given PARSER. If FILE is NULL,
the output is printed on stderr. */
void
cp_debug_parser (FILE *file, cp_parser *parser)
{
const size_t window_size = 20;
cp_token *token;
expanded_location eloc;
if (file == NULL)
file = stderr;
fprintf (file, "Parser state\n\n");
fprintf (file, "Number of tokens: %u\n",
VEC_length (cp_token, parser->lexer->buffer));
cp_debug_print_tree_if_set (file, "Lookup scope", parser->scope);
cp_debug_print_tree_if_set (file, "Object scope",
parser->object_scope);
cp_debug_print_tree_if_set (file, "Qualifying scope",
parser->qualifying_scope);
cp_debug_print_context_stack (file, parser->context);
cp_debug_print_flag (file, "Allow GNU extensions",
parser->allow_gnu_extensions_p);
cp_debug_print_flag (file, "'>' token is greater-than",
parser->greater_than_is_operator_p);
cp_debug_print_flag (file, "Default args allowed in current "
"parameter list", parser->default_arg_ok_p);
cp_debug_print_flag (file, "Parsing integral constant-expression",
parser->integral_constant_expression_p);
cp_debug_print_flag (file, "Allow non-constant expression in current "
"constant-expression",
parser->allow_non_integral_constant_expression_p);
cp_debug_print_flag (file, "Seen non-constant expression",
parser->non_integral_constant_expression_p);
cp_debug_print_flag (file, "Local names and 'this' forbidden in "
"current context",
parser->local_variables_forbidden_p);
cp_debug_print_flag (file, "In unbraced linkage specification",
parser->in_unbraced_linkage_specification_p);
cp_debug_print_flag (file, "Parsing a declarator",
parser->in_declarator_p);
cp_debug_print_flag (file, "In template argument list",
parser->in_template_argument_list_p);
cp_debug_print_flag (file, "Parsing an iteration statement",
parser->in_statement & IN_ITERATION_STMT);
cp_debug_print_flag (file, "Parsing a switch statement",
parser->in_statement & IN_SWITCH_STMT);
cp_debug_print_flag (file, "Parsing a structured OpenMP block",
parser->in_statement & IN_OMP_BLOCK);
cp_debug_print_flag (file, "Parsing a an OpenMP loop",
parser->in_statement & IN_OMP_FOR);
cp_debug_print_flag (file, "Parsing an if statement",
parser->in_statement & IN_IF_STMT);
cp_debug_print_flag (file, "Parsing a type-id in an expression "
"context", parser->in_type_id_in_expr_p);
cp_debug_print_flag (file, "Declarations are implicitly extern \"C\"",
parser->implicit_extern_c);
cp_debug_print_flag (file, "String expressions should be translated "
"to execution character set",
parser->translate_strings_p);
cp_debug_print_flag (file, "Parsing function body outside of a "
"local class", parser->in_function_body);
cp_debug_print_flag (file, "Auto correct a colon to a scope operator",
parser->colon_corrects_to_scope_p);
if (parser->type_definition_forbidden_message)
fprintf (file, "Error message for forbidden type definitions: %s\n",
parser->type_definition_forbidden_message);
cp_debug_print_unparsed_queues (file, parser->unparsed_queues);
fprintf (file, "Number of class definitions in progress: %u\n",
parser->num_classes_being_defined);
fprintf (file, "Number of template parameter lists for the current "
"declaration: %u\n", parser->num_template_parameter_lists);
cp_debug_parser_tokens (file, parser, window_size);
token = parser->lexer->next_token;
fprintf (file, "Next token to parse:\n");
fprintf (file, "\tToken: ");
cp_lexer_print_token (file, token);
eloc = expand_location (token->location);
fprintf (file, "\n\tFile: %s\n", eloc.file);
fprintf (file, "\tLine: %d\n", eloc.line);
fprintf (file, "\tColumn: %d\n", eloc.column);
}
/* Allocate memory for a new lexer object and return it. */
static cp_lexer *
cp_lexer_alloc (void)
{
cp_lexer *lexer;
c_common_no_more_pch ();
/* Allocate the memory. */
lexer = ggc_alloc_cleared_cp_lexer ();
/* Initially we are not debugging. */
lexer->debugging_p = false;
lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
CP_SAVED_TOKEN_STACK);
/* Create the buffer. */
lexer->buffer = VEC_alloc (cp_token, gc, CP_LEXER_BUFFER_SIZE);
return lexer;
}
/* Create a new main C++ lexer, the lexer that gets tokens from the
preprocessor. */
static cp_lexer *
cp_lexer_new_main (void)
{
cp_lexer *lexer;
cp_token token;
/* It's possible that parsing the first pragma will load a PCH file,
which is a GC collection point. So we have to do that before
allocating any memory. */
cp_parser_initial_pragma (&token);
lexer = cp_lexer_alloc ();
/* Put the first token in the buffer. */
VEC_quick_push (cp_token, lexer->buffer, &token);
/* Get the remaining tokens from the preprocessor. */
while (token.type != CPP_EOF)
{
cp_lexer_get_preprocessor_token (lexer, &token);
VEC_safe_push (cp_token, gc, lexer->buffer, &token);
}
lexer->last_token = VEC_address (cp_token, lexer->buffer)
+ VEC_length (cp_token, lexer->buffer)
- 1;
lexer->next_token = VEC_length (cp_token, lexer->buffer)
? VEC_address (cp_token, lexer->buffer)
: &eof_token;
/* Subsequent preprocessor diagnostics should use compiler
diagnostic functions to get the compiler source location. */
done_lexing = true;
gcc_assert (!lexer->next_token->purged_p);
return lexer;
}
/* Create a new lexer whose token stream is primed with the tokens in
CACHE. When these tokens are exhausted, no new tokens will be read. */
static cp_lexer *
cp_lexer_new_from_tokens (cp_token_cache *cache)
{
cp_token *first = cache->first;
cp_token *last = cache->last;
cp_lexer *lexer = ggc_alloc_cleared_cp_lexer ();
/* We do not own the buffer. */
lexer->buffer = NULL;
lexer->next_token = first == last ? &eof_token : first;
lexer->last_token = last;
lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
CP_SAVED_TOKEN_STACK);
/* Initially we are not debugging. */
lexer->debugging_p = false;
gcc_assert (!lexer->next_token->purged_p);
return lexer;
}
/* Frees all resources associated with LEXER. */
static void
cp_lexer_destroy (cp_lexer *lexer)
{
VEC_free (cp_token, gc, lexer->buffer);
VEC_free (cp_token_position, heap, lexer->saved_tokens);
ggc_free (lexer);
}
/* Returns nonzero if debugging information should be output. */
static inline bool
cp_lexer_debugging_p (cp_lexer *lexer)
{
return lexer->debugging_p;
}
static inline cp_token_position
cp_lexer_token_position (cp_lexer *lexer, bool previous_p)
{
gcc_assert (!previous_p || lexer->next_token != &eof_token);
return lexer->next_token - previous_p;
}
static inline cp_token *
cp_lexer_token_at (cp_lexer *lexer ATTRIBUTE_UNUSED, cp_token_position pos)
{
return pos;
}
static inline void
cp_lexer_set_token_position (cp_lexer *lexer, cp_token_position pos)
{
lexer->next_token = cp_lexer_token_at (lexer, pos);
}
static inline cp_token_position
cp_lexer_previous_token_position (cp_lexer *lexer)
{
if (lexer->next_token == &eof_token)
return lexer->last_token - 1;
else
return cp_lexer_token_position (lexer, true);
}
static inline cp_token *
cp_lexer_previous_token (cp_lexer *lexer)
{
cp_token_position tp = cp_lexer_previous_token_position (lexer);
return cp_lexer_token_at (lexer, tp);
}
/* nonzero if we are presently saving tokens. */
static inline int
cp_lexer_saving_tokens (const cp_lexer* lexer)
{
return VEC_length (cp_token_position, lexer->saved_tokens) != 0;
}
/* Store the next token from the preprocessor in *TOKEN. Return true
if we reach EOF. If LEXER is NULL, assume we are handling an
initial #pragma pch_preprocess, and thus want the lexer to return
processed strings. */
static void
cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token)
{
static int is_extern_c = 0;
/* Get a new token from the preprocessor. */
token->type
= c_lex_with_flags (&token->u.value, &token->location, &token->flags,
lexer == NULL ? 0 : C_LEX_STRING_NO_JOIN);
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
token->purged_p = false;
/* On some systems, some header files are surrounded by an
implicit extern "C" block. Set a flag in the token if it
comes from such a header. */
is_extern_c += pending_lang_change;
pending_lang_change = 0;
token->implicit_extern_c = is_extern_c > 0;
/* Check to see if this token is a keyword. */
if (token->type == CPP_NAME)
{
if (C_IS_RESERVED_WORD (token->u.value))
{
/* Mark this token as a keyword. */
token->type = CPP_KEYWORD;
/* Record which keyword. */
token->keyword = C_RID_CODE (token->u.value);
}
else
{
if (warn_cxx0x_compat
&& C_RID_CODE (token->u.value) >= RID_FIRST_CXX0X
&& C_RID_CODE (token->u.value) <= RID_LAST_CXX0X)
{
/* Warn about the C++0x keyword (but still treat it as
an identifier). */
warning (OPT_Wc__0x_compat,
"identifier %qE is a keyword in C++11",
token->u.value);
/* Clear out the C_RID_CODE so we don't warn about this
particular identifier-turned-keyword again. */
C_SET_RID_CODE (token->u.value, RID_MAX);
}
token->ambiguous_p = false;
token->keyword = RID_MAX;
}
}
else if (token->type == CPP_AT_NAME)
{
/* This only happens in Objective-C++; it must be a keyword. */
token->type = CPP_KEYWORD;
switch (C_RID_CODE (token->u.value))
{
/* Replace 'class' with '@class', 'private' with '@private',
etc. This prevents confusion with the C++ keyword
'class', and makes the tokens consistent with other
Objective-C 'AT' keywords. For example '@class' is
reported as RID_AT_CLASS which is consistent with
'@synchronized', which is reported as
RID_AT_SYNCHRONIZED.
*/
case RID_CLASS: token->keyword = RID_AT_CLASS; break;
case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break;
case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break;
case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break;
case RID_THROW: token->keyword = RID_AT_THROW; break;
case RID_TRY: token->keyword = RID_AT_TRY; break;
case RID_CATCH: token->keyword = RID_AT_CATCH; break;
default: token->keyword = C_RID_CODE (token->u.value);
}
}
else if (token->type == CPP_PRAGMA)
{
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = ((enum pragma_kind)
TREE_INT_CST_LOW (token->u.value));
token->u.value = NULL_TREE;
}
}
/* Update the globals input_location and the input file stack from TOKEN. */
static inline void
cp_lexer_set_source_position_from_token (cp_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
}
}
/* Return a pointer to the next token in the token stream, but do not
consume it. */
static inline cp_token *
cp_lexer_peek_token (cp_lexer *lexer)
{
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token);
putc ('\n', cp_lexer_debug_stream);
}
return lexer->next_token;
}
/* Return true if the next token has the indicated TYPE. */
static inline bool
cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type)
{
return cp_lexer_peek_token (lexer)->type == type;
}
/* Return true if the next token does not have the indicated TYPE. */
static inline bool
cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type)
{
return !cp_lexer_next_token_is (lexer, type);
}
/* Return true if the next token is the indicated KEYWORD. */
static inline bool
cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword)
{
return cp_lexer_peek_token (lexer)->keyword == keyword;
}
/* Return true if the next token is not the indicated KEYWORD. */
static inline bool
cp_lexer_next_token_is_not_keyword (cp_lexer* lexer, enum rid keyword)
{
return cp_lexer_peek_token (lexer)->keyword != keyword;
}
/* Return true if the next token is a keyword for a decl-specifier. */
static bool
cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer)
{
cp_token *token;
token = cp_lexer_peek_token (lexer);
switch (token->keyword)
{
/* auto specifier: storage-class-specifier in C++,
simple-type-specifier in C++0x. */
case RID_AUTO:
/* Storage classes. */
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Elaborated type specifiers. */
case RID_ENUM:
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
case RID_TYPENAME:
/* Simple type specifiers. */
case RID_CHAR:
case RID_CHAR16:
case RID_CHAR32:
case RID_WCHAR:
case RID_BOOL:
case RID_SHORT:
case RID_INT:
case RID_LONG:
case RID_INT128:
case RID_SIGNED:
case RID_UNSIGNED:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
/* GNU extensions. */
case RID_ATTRIBUTE:
case RID_TYPEOF:
/* C++0x extensions. */
case RID_DECLTYPE:
case RID_UNDERLYING_TYPE:
return true;
default:
return false;
}
}
/* Returns TRUE iff the token T begins a decltype type. */
static bool
token_is_decltype (cp_token *t)
{
return (t->keyword == RID_DECLTYPE
|| t->type == CPP_DECLTYPE);
}
/* Returns TRUE iff the next token begins a decltype type. */
static bool
cp_lexer_next_token_is_decltype (cp_lexer *lexer)
{
cp_token *t = cp_lexer_peek_token (lexer);
return token_is_decltype (t);
}
/* Return a pointer to the Nth token in the token stream. If N is 1,
then this is precisely equivalent to cp_lexer_peek_token (except
that it is not inline). One would like to disallow that case, but
there is one case (cp_parser_nth_token_starts_template_id) where
the caller passes a variable for N and it might be 1. */
static cp_token *
cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n)
{
cp_token *token;
/* N is 1-based, not zero-based. */
gcc_assert (n > 0);
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream,
"cp_lexer: peeking ahead %ld at token: ", (long)n);
--n;
token = lexer->next_token;
gcc_assert (!n || token != &eof_token);
while (n != 0)
{
++token;
if (token == lexer->last_token)
{
token = &eof_token;
break;
}
if (!token->purged_p)
--n;
}
if (cp_lexer_debugging_p (lexer))
{
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Return the next token, and advance the lexer's next_token pointer
to point to the next non-purged token. */
static cp_token *
cp_lexer_consume_token (cp_lexer* lexer)
{
cp_token *token = lexer->next_token;
gcc_assert (token != &eof_token);
gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL);
do
{
lexer->next_token++;
if (lexer->next_token == lexer->last_token)
{
lexer->next_token = &eof_token;
break;
}
}
while (lexer->next_token->purged_p);
cp_lexer_set_source_position_from_token (token);
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Permanently remove the next token from the token stream, and
advance the next_token pointer to refer to the next non-purged
token. */
static void
cp_lexer_purge_token (cp_lexer *lexer)
{
cp_token *tok = lexer->next_token;
gcc_assert (tok != &eof_token);
tok->purged_p = true;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
do
{
tok++;
if (tok == lexer->last_token)
{
tok = &eof_token;
break;
}
}
while (tok->purged_p);
lexer->next_token = tok;
}
/* Permanently remove all tokens after TOK, up to, but not
including, the token that will be returned next by
cp_lexer_peek_token. */
static void
cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok)
{
cp_token *peek = lexer->next_token;
if (peek == &eof_token)
peek = lexer->last_token;
gcc_assert (tok < peek);
for ( tok += 1; tok != peek; tok += 1)
{
tok->purged_p = true;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
}
}
/* Begin saving tokens. All tokens consumed after this point will be
preserved. */
static void
cp_lexer_save_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n");
VEC_safe_push (cp_token_position, heap,
lexer->saved_tokens, lexer->next_token);
}
/* Commit to the portion of the token stream most recently saved. */
static void
cp_lexer_commit_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n");
VEC_pop (cp_token_position, lexer->saved_tokens);
}
/* Return all tokens saved since the last call to cp_lexer_save_tokens
to the token stream. Stop saving tokens. */
static void
cp_lexer_rollback_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n");
lexer->next_token = VEC_pop (cp_token_position, lexer->saved_tokens);
}
/* Print a representation of the TOKEN on the STREAM. */
static void
cp_lexer_print_token (FILE * stream, cp_token *token)
{
/* We don't use cpp_type2name here because the parser defines
a few tokens of its own. */
static const char *const token_names[] = {
/* cpplib-defined token types */
#define OP(e, s) #e,
#define TK(e, s) #e,
TTYPE_TABLE
#undef OP
#undef TK
/* C++ parser token types - see "Manifest constants", above. */
"KEYWORD",
"TEMPLATE_ID",
"NESTED_NAME_SPECIFIER",
};
/* For some tokens, print the associated data. */
switch (token->type)
{
case CPP_KEYWORD:
/* Some keywords have a value that is not an IDENTIFIER_NODE.
For example, `struct' is mapped to an INTEGER_CST. */
if (TREE_CODE (token->u.value) != IDENTIFIER_NODE)
break;
/* else fall through */
case CPP_NAME:
fputs (IDENTIFIER_POINTER (token->u.value), stream);
break;
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value));
break;
case CPP_NUMBER:
print_generic_expr (stream, token->u.value, 0);
break;
default:
/* If we have a name for the token, print it out. Otherwise, we
simply give the numeric code. */
if (token->type < ARRAY_SIZE(token_names))
fputs (token_names[token->type], stream);
else
fprintf (stream, "[%d]", token->type);
break;
}
}
/* Start emitting debugging information. */
static void
cp_lexer_start_debugging (cp_lexer* lexer)
{
lexer->debugging_p = true;
cp_lexer_debug_stream = stderr;
}
/* Stop emitting debugging information. */
static void
cp_lexer_stop_debugging (cp_lexer* lexer)
{
lexer->debugging_p = false;
cp_lexer_debug_stream = NULL;
}
/* Create a new cp_token_cache, representing a range of tokens. */
static cp_token_cache *
cp_token_cache_new (cp_token *first, cp_token *last)
{
cp_token_cache *cache = ggc_alloc_cp_token_cache ();
cache->first = first;
cache->last = last;
return cache;
}
/* Decl-specifiers. */
/* Set *DECL_SPECS to represent an empty decl-specifier-seq. */
static void
clear_decl_specs (cp_decl_specifier_seq *decl_specs)
{
memset (decl_specs, 0, sizeof (cp_decl_specifier_seq));
}
/* Declarators. */
/* Nothing other than the parser should be creating declarators;
declarators are a semi-syntactic representation of C++ entities.
Other parts of the front end that need to create entities (like
VAR_DECLs or FUNCTION_DECLs) should do that directly. */
static cp_declarator *make_call_declarator
(cp_declarator *, tree, cp_cv_quals, cp_virt_specifiers, tree, tree);
static cp_declarator *make_array_declarator
(cp_declarator *, tree);
static cp_declarator *make_pointer_declarator
(cp_cv_quals, cp_declarator *);
static cp_declarator *make_reference_declarator
(cp_cv_quals, cp_declarator *, bool);
static cp_parameter_declarator *make_parameter_declarator
(cp_decl_specifier_seq *, cp_declarator *, tree);
static cp_declarator *make_ptrmem_declarator
(cp_cv_quals, tree, cp_declarator *);
/* An erroneous declarator. */
static cp_declarator *cp_error_declarator;
/* The obstack on which declarators and related data structures are
allocated. */
static struct obstack declarator_obstack;
/* Alloc BYTES from the declarator memory pool. */
static inline void *
alloc_declarator (size_t bytes)
{
return obstack_alloc (&declarator_obstack, bytes);
}
/* Allocate a declarator of the indicated KIND. Clear fields that are
common to all declarators. */
static cp_declarator *
make_declarator (cp_declarator_kind kind)
{
cp_declarator *declarator;
declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator));
declarator->kind = kind;
declarator->attributes = NULL_TREE;
declarator->declarator = NULL;
declarator->parameter_pack_p = false;
declarator->id_loc = UNKNOWN_LOCATION;
return declarator;
}
/* Make a declarator for a generalized identifier. If
QUALIFYING_SCOPE is non-NULL, the identifier is
QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just
UNQUALIFIED_NAME. SFK indicates the kind of special function this
is, if any. */
static cp_declarator *
make_id_declarator (tree qualifying_scope, tree unqualified_name,
special_function_kind sfk)
{
cp_declarator *declarator;
/* It is valid to write:
class C { void f(); };
typedef C D;
void D::f();
The standard is not clear about whether `typedef const C D' is
legal; as of 2002-09-15 the committee is considering that
question. EDG 3.0 allows that syntax. Therefore, we do as
well. */
if (qualifying_scope && TYPE_P (qualifying_scope))
qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope);
gcc_assert (TREE_CODE (unqualified_name) == IDENTIFIER_NODE
|| TREE_CODE (unqualified_name) == BIT_NOT_EXPR
|| TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR);
declarator = make_declarator (cdk_id);
declarator->u.id.qualifying_scope = qualifying_scope;
declarator->u.id.unqualified_name = unqualified_name;
declarator->u.id.sfk = sfk;
return declarator;
}
/* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list
of modifiers such as const or volatile to apply to the pointer
type, represented as identifiers. */
cp_declarator *
make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_pointer);
declarator->declarator = target;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = NULL_TREE;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Like make_pointer_declarator -- but for references. */
cp_declarator *
make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target,
bool rvalue_ref)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_reference);
declarator->declarator = target;
declarator->u.reference.qualifiers = cv_qualifiers;
declarator->u.reference.rvalue_ref = rvalue_ref;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Like make_pointer_declarator -- but for a pointer to a non-static
member of CLASS_TYPE. */
cp_declarator *
make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type,
cp_declarator *pointee)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_ptrmem);
declarator->declarator = pointee;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = class_type;
if (pointee)
{
declarator->parameter_pack_p = pointee->parameter_pack_p;
pointee->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Make a declarator for the function given by TARGET, with the
indicated PARMS. The CV_QUALIFIERS aply to the function, as in
"const"-qualified member function. The EXCEPTION_SPECIFICATION
indicates what exceptions can be thrown. */
cp_declarator *
make_call_declarator (cp_declarator *target,
tree parms,
cp_cv_quals cv_qualifiers,
cp_virt_specifiers virt_specifiers,
tree exception_specification,
tree late_return_type)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_function);
declarator->declarator = target;
declarator->u.function.parameters = parms;
declarator->u.function.qualifiers = cv_qualifiers;
declarator->u.function.virt_specifiers = virt_specifiers;
declarator->u.function.exception_specification = exception_specification;
declarator->u.function.late_return_type = late_return_type;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Make a declarator for an array of BOUNDS elements, each of which is
defined by ELEMENT. */
cp_declarator *
make_array_declarator (cp_declarator *element, tree bounds)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_array);
declarator->declarator = element;
declarator->u.array.bounds = bounds;
if (element)
{
declarator->id_loc = element->id_loc;
declarator->parameter_pack_p = element->parameter_pack_p;
element->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Determine whether the declarator we've seen so far can be a
parameter pack, when followed by an ellipsis. */
static bool
declarator_can_be_parameter_pack (cp_declarator *declarator)
{
/* Search for a declarator name, or any other declarator that goes
after the point where the ellipsis could appear in a parameter
pack. If we find any of these, then this declarator can not be
made into a parameter pack. */
bool found = false;
while (declarator && !found)
{
switch ((int)declarator->kind)
{
case cdk_id:
case cdk_array:
found = true;
break;
case cdk_error:
return true;
default:
declarator = declarator->declarator;
break;
}
}
return !found;
}
cp_parameter_declarator *no_parameters;
/* Create a parameter declarator with the indicated DECL_SPECIFIERS,
DECLARATOR and DEFAULT_ARGUMENT. */
cp_parameter_declarator *
make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree default_argument)
{
cp_parameter_declarator *parameter;
parameter = ((cp_parameter_declarator *)
alloc_declarator (sizeof (cp_parameter_declarator)));
parameter->next = NULL;
if (decl_specifiers)
parameter->decl_specifiers = *decl_specifiers;
else
clear_decl_specs (¶meter->decl_specifiers);
parameter->declarator = declarator;
parameter->default_argument = default_argument;
parameter->ellipsis_p = false;
return parameter;
}
/* Returns true iff DECLARATOR is a declaration for a function. */
static bool
function_declarator_p (const cp_declarator *declarator)
{
while (declarator)
{
if (declarator->kind == cdk_function
&& declarator->declarator->kind == cdk_id)
return true;
if (declarator->kind == cdk_id
|| declarator->kind == cdk_error)
return false;
declarator = declarator->declarator;
}
return false;
}
/* The parser. */
/* Overview
--------
A cp_parser parses the token stream as specified by the C++
grammar. Its job is purely parsing, not semantic analysis. For
example, the parser breaks the token stream into declarators,
expressions, statements, and other similar syntactic constructs.
It does not check that the types of the expressions on either side
of an assignment-statement are compatible, or that a function is
not declared with a parameter of type `void'.
The parser invokes routines elsewhere in the compiler to perform
semantic analysis and to build up the abstract syntax tree for the
code processed.
The parser (and the template instantiation code, which is, in a
way, a close relative of parsing) are the only parts of the
compiler that should be calling push_scope and pop_scope, or
related functions. The parser (and template instantiation code)
keeps track of what scope is presently active; everything else
should simply honor that. (The code that generates static
initializers may also need to set the scope, in order to check
access control correctly when emitting the initializers.)
Methodology
-----------
The parser is of the standard recursive-descent variety. Upcoming
tokens in the token stream are examined in order to determine which
production to use when parsing a non-terminal. Some C++ constructs
require arbitrary look ahead to disambiguate. For example, it is
impossible, in the general case, to tell whether a statement is an
expression or declaration without scanning the entire statement.
Therefore, the parser is capable of "parsing tentatively." When the
parser is not sure what construct comes next, it enters this mode.
Then, while we attempt to parse the construct, the parser queues up
error messages, rather than issuing them immediately, and saves the
tokens it consumes. If the construct is parsed successfully, the
parser "commits", i.e., it issues any queued error messages and
the tokens that were being preserved are permanently discarded.
If, however, the construct is not parsed successfully, the parser
rolls back its state completely so that it can resume parsing using
a different alternative.
Future Improvements
-------------------
The performance of the parser could probably be improved substantially.
We could often eliminate the need to parse tentatively by looking ahead
a little bit. In some places, this approach might not entirely eliminate
the need to parse tentatively, but it might still speed up the average
case. */
/* Flags that are passed to some parsing functions. These values can
be bitwise-ored together. */
enum
{
/* No flags. */
CP_PARSER_FLAGS_NONE = 0x0,
/* The construct is optional. If it is not present, then no error
should be issued. */
CP_PARSER_FLAGS_OPTIONAL = 0x1,
/* When parsing a type-specifier, treat user-defined type-names
as non-type identifiers. */
CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2,
/* When parsing a type-specifier, do not try to parse a class-specifier
or enum-specifier. */
CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS = 0x4,
/* When parsing a decl-specifier-seq, only allow type-specifier or
constexpr. */
CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR = 0x8
};
/* This type is used for parameters and variables which hold
combinations of the above flags. */
typedef int cp_parser_flags;
/* The different kinds of declarators we want to parse. */
typedef enum cp_parser_declarator_kind
{
/* We want an abstract declarator. */
CP_PARSER_DECLARATOR_ABSTRACT,
/* We want a named declarator. */
CP_PARSER_DECLARATOR_NAMED,
/* We don't mind, but the name must be an unqualified-id. */
CP_PARSER_DECLARATOR_EITHER
} cp_parser_declarator_kind;
/* The precedence values used to parse binary expressions. The minimum value
of PREC must be 1, because zero is reserved to quickly discriminate
binary operators from other tokens. */
enum cp_parser_prec
{
PREC_NOT_OPERATOR,
PREC_LOGICAL_OR_EXPRESSION,
PREC_LOGICAL_AND_EXPRESSION,
PREC_INCLUSIVE_OR_EXPRESSION,
PREC_EXCLUSIVE_OR_EXPRESSION,
PREC_AND_EXPRESSION,
PREC_EQUALITY_EXPRESSION,
PREC_RELATIONAL_EXPRESSION,
PREC_SHIFT_EXPRESSION,
PREC_ADDITIVE_EXPRESSION,
PREC_MULTIPLICATIVE_EXPRESSION,
PREC_PM_EXPRESSION,
NUM_PREC_VALUES = PREC_PM_EXPRESSION
};
/* A mapping from a token type to a corresponding tree node type, with a
precedence value. */
typedef struct cp_parser_binary_operations_map_node
{
/* The token type. */
enum cpp_ttype token_type;
/* The corresponding tree code. */
enum tree_code tree_type;
/* The precedence of this operator. */
enum cp_parser_prec prec;
} cp_parser_binary_operations_map_node;
typedef struct cp_parser_expression_stack_entry
{
/* Left hand side of the binary operation we are currently
parsing. */
tree lhs;
/* Original tree code for left hand side, if it was a binary
expression itself (used for -Wparentheses). */
enum tree_code lhs_type;
/* Tree code for the binary operation we are parsing. */
enum tree_code tree_type;
/* Precedence of the binary operation we are parsing. */
enum cp_parser_prec prec;
} cp_parser_expression_stack_entry;
/* The stack for storing partial expressions. We only need NUM_PREC_VALUES
entries because precedence levels on the stack are monotonically
increasing. */
typedef struct cp_parser_expression_stack_entry
cp_parser_expression_stack[NUM_PREC_VALUES];
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser_context *cp_parser_context_new
(cp_parser_context *);
/* Class variables. */
static GTY((deletable)) cp_parser_context* cp_parser_context_free_list;
/* The operator-precedence table used by cp_parser_binary_expression.
Transformed into an associative array (binops_by_token) by
cp_parser_new. */
static const cp_parser_binary_operations_map_node binops[] = {
{ CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION },
{ CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION },
{ CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION },
{ CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION },
{ CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION },
{ CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION },
{ CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION }
};
/* The same as binops, but initialized by cp_parser_new so that
binops_by_token[N].token_type == N. Used in cp_parser_binary_expression
for speed. */
static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES];
/* Constructors and destructors. */
/* Construct a new context. The context below this one on the stack
is given by NEXT. */
static cp_parser_context *
cp_parser_context_new (cp_parser_context* next)
{
cp_parser_context *context;
/* Allocate the storage. */
if (cp_parser_context_free_list != NULL)
{
/* Pull the first entry from the free list. */
context = cp_parser_context_free_list;
cp_parser_context_free_list = context->next;
memset (context, 0, sizeof (*context));
}
else
context = ggc_alloc_cleared_cp_parser_context ();
/* No errors have occurred yet in this context. */
context->status = CP_PARSER_STATUS_KIND_NO_ERROR;
/* If this is not the bottommost context, copy information that we
need from the previous context. */
if (next)
{
/* If, in the NEXT context, we are parsing an `x->' or `x.'
expression, then we are parsing one in this context, too. */
context->object_type = next->object_type;
/* Thread the stack. */
context->next = next;
}
return context;
}
/* Managing the unparsed function queues. */
#define unparsed_funs_with_default_args \
VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_default_args
#define unparsed_funs_with_definitions \
VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_definitions
#define unparsed_nsdmis \
VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->nsdmis
static void
push_unparsed_function_queues (cp_parser *parser)
{
VEC_safe_push (cp_unparsed_functions_entry, gc,
parser->unparsed_queues, NULL);
unparsed_funs_with_default_args = NULL;
unparsed_funs_with_definitions = make_tree_vector ();
unparsed_nsdmis = NULL;
}
static void
pop_unparsed_function_queues (cp_parser *parser)
{
release_tree_vector (unparsed_funs_with_definitions);
VEC_pop (cp_unparsed_functions_entry, parser->unparsed_queues);
}
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser *cp_parser_new
(void);
/* Routines to parse various constructs.
Those that return `tree' will return the error_mark_node (rather
than NULL_TREE) if a parse error occurs, unless otherwise noted.
Sometimes, they will return an ordinary node if error-recovery was
attempted, even though a parse error occurred. So, to check
whether or not a parse error occurred, you should always use
cp_parser_error_occurred. If the construct is optional (indicated
either by an `_opt' in the name of the function that does the
parsing or via a FLAGS parameter), then NULL_TREE is returned if
the construct is not present. */
/* Lexical conventions [gram.lex] */
static tree cp_parser_identifier
(cp_parser *);
static tree cp_parser_string_literal
(cp_parser *, bool, bool);
static tree cp_parser_userdef_char_literal
(cp_parser *);
static tree cp_parser_userdef_string_literal
(cp_token *);
static tree cp_parser_userdef_numeric_literal
(cp_parser *);
/* Basic concepts [gram.basic] */
static bool cp_parser_translation_unit
(cp_parser *);
/* Expressions [gram.expr] */
static tree cp_parser_primary_expression
(cp_parser *, bool, bool, bool, cp_id_kind *);
static tree cp_parser_id_expression
(cp_parser *, bool, bool, bool *, bool, bool);
static tree cp_parser_unqualified_id
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier_opt
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_qualifying_entity
(cp_parser *, bool, bool, bool, bool, bool);
static tree cp_parser_postfix_expression
(cp_parser *, bool, bool, bool, cp_id_kind *);
static tree cp_parser_postfix_open_square_expression
(cp_parser *, tree, bool);
static tree cp_parser_postfix_dot_deref_expression
(cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *, location_t);
static VEC(tree,gc) *cp_parser_parenthesized_expression_list
(cp_parser *, int, bool, bool, bool *);
/* Values for the second parameter of cp_parser_parenthesized_expression_list. */
enum { non_attr = 0, normal_attr = 1, id_attr = 2 };
static void cp_parser_pseudo_destructor_name
(cp_parser *, tree *, tree *);
static tree cp_parser_unary_expression
(cp_parser *, bool, bool, cp_id_kind *);
static enum tree_code cp_parser_unary_operator
(cp_token *);
static tree cp_parser_new_expression
(cp_parser *);
static VEC(tree,gc) *cp_parser_new_placement
(cp_parser *);
static tree cp_parser_new_type_id
(cp_parser *, tree *);
static cp_declarator *cp_parser_new_declarator_opt
(cp_parser *);
static cp_declarator *cp_parser_direct_new_declarator
(cp_parser *);
static VEC(tree,gc) *cp_parser_new_initializer
(cp_parser *);
static tree cp_parser_delete_expression
(cp_parser *);
static tree cp_parser_cast_expression
(cp_parser *, bool, bool, cp_id_kind *);
static tree cp_parser_binary_expression
(cp_parser *, bool, bool, enum cp_parser_prec, cp_id_kind *);
static tree cp_parser_question_colon_clause
(cp_parser *, tree);
static tree cp_parser_assignment_expression
(cp_parser *, bool, cp_id_kind *);
static enum tree_code cp_parser_assignment_operator_opt
(cp_parser *);
static tree cp_parser_expression
(cp_parser *, bool, cp_id_kind *);
static tree cp_parser_constant_expression
(cp_parser *, bool, bool *);
static tree cp_parser_builtin_offsetof
(cp_parser *);
static tree cp_parser_lambda_expression
(cp_parser *);
static void cp_parser_lambda_introducer
(cp_parser *, tree);
static bool cp_parser_lambda_declarator_opt
(cp_parser *, tree);
static void cp_parser_lambda_body
(cp_parser *, tree);
/* Statements [gram.stmt.stmt] */
static void cp_parser_statement
(cp_parser *, tree, bool, bool *);
static void cp_parser_label_for_labeled_statement
(cp_parser *);
static tree cp_parser_expression_statement
(cp_parser *, tree);
static tree cp_parser_compound_statement
(cp_parser *, tree, bool, bool);
static void cp_parser_statement_seq_opt
(cp_parser *, tree);
static tree cp_parser_selection_statement
(cp_parser *, bool *);
static tree cp_parser_condition
(cp_parser *);
static tree cp_parser_iteration_statement
(cp_parser *);
static bool cp_parser_for_init_statement
(cp_parser *, tree *decl);
static tree cp_parser_for
(cp_parser *);
static tree cp_parser_c_for
(cp_parser *, tree, tree);
static tree cp_parser_range_for
(cp_parser *, tree, tree, tree);
static void do_range_for_auto_deduction
(tree, tree);
static tree cp_parser_perform_range_for_lookup
(tree, tree *, tree *);
static tree cp_parser_range_for_member_function
(tree, tree);
static tree cp_parser_jump_statement
(cp_parser *);
static void cp_parser_declaration_statement
(cp_parser *);
static tree cp_parser_implicitly_scoped_statement
(cp_parser *, bool *);
static void cp_parser_already_scoped_statement
(cp_parser *);
/* Declarations [gram.dcl.dcl] */
static void cp_parser_declaration_seq_opt
(cp_parser *);
static void cp_parser_declaration
(cp_parser *);
static void cp_parser_block_declaration
(cp_parser *, bool);
static void cp_parser_simple_declaration
(cp_parser *, bool, tree *);
static void cp_parser_decl_specifier_seq
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *);
static tree cp_parser_storage_class_specifier_opt
(cp_parser *);
static tree cp_parser_function_specifier_opt
(cp_parser *, cp_decl_specifier_seq *);
static tree cp_parser_type_specifier
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool,
int *, bool *);
static tree cp_parser_simple_type_specifier
(cp_parser *, cp_decl_specifier_seq *, cp_parser_flags);
static tree cp_parser_type_name
(cp_parser *);
static tree cp_parser_nonclass_name
(cp_parser* parser);
static tree cp_parser_elaborated_type_specifier
(cp_parser *, bool, bool);
static tree cp_parser_enum_specifier
(cp_parser *);
static void cp_parser_enumerator_list
(cp_parser *, tree);
static void cp_parser_enumerator_definition
(cp_parser *, tree);
static tree cp_parser_namespace_name
(cp_parser *);
static void cp_parser_namespace_definition
(cp_parser *);
static void cp_parser_namespace_body
(cp_parser *);
static tree cp_parser_qualified_namespace_specifier
(cp_parser *);
static void cp_parser_namespace_alias_definition
(cp_parser *);
static bool cp_parser_using_declaration
(cp_parser *, bool);
static void cp_parser_using_directive
(cp_parser *);
static tree cp_parser_alias_declaration
(cp_parser *);
static void cp_parser_asm_definition
(cp_parser *);
static void cp_parser_linkage_specification
(cp_parser *);
static void cp_parser_static_assert
(cp_parser *, bool);
static tree cp_parser_decltype
(cp_parser *);
/* Declarators [gram.dcl.decl] */
static tree cp_parser_init_declarator
(cp_parser *, cp_decl_specifier_seq *, VEC (deferred_access_check,gc)*, bool, bool, int, bool *, tree *);
static cp_declarator *cp_parser_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool *, bool);
static cp_declarator *cp_parser_direct_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool);
static enum tree_code cp_parser_ptr_operator
(cp_parser *, tree *, cp_cv_quals *);
static cp_cv_quals cp_parser_cv_qualifier_seq_opt
(cp_parser *);
static cp_virt_specifiers cp_parser_virt_specifier_seq_opt
(cp_parser *);
static tree cp_parser_late_return_type_opt
(cp_parser *, cp_cv_quals);
static tree cp_parser_declarator_id
(cp_parser *, bool);
static tree cp_parser_type_id
(cp_parser *);
static tree cp_parser_template_type_arg
(cp_parser *);
static tree cp_parser_trailing_type_id (cp_parser *);
static tree cp_parser_type_id_1
(cp_parser *, bool, bool);
static void cp_parser_type_specifier_seq
(cp_parser *, bool, bool, cp_decl_specifier_seq *);
static tree cp_parser_parameter_declaration_clause
(cp_parser *);
static tree cp_parser_parameter_declaration_list
(cp_parser *, bool *);
static cp_parameter_declarator *cp_parser_parameter_declaration
(cp_parser *, bool, bool *);
static tree cp_parser_default_argument
(cp_parser *, bool);
static void cp_parser_function_body
(cp_parser *);
static tree cp_parser_initializer
(cp_parser *, bool *, bool *);
static tree cp_parser_initializer_clause
(cp_parser *, bool *);
static tree cp_parser_braced_list
(cp_parser*, bool*);
static VEC(constructor_elt,gc) *cp_parser_initializer_list
(cp_parser *, bool *);
static bool cp_parser_ctor_initializer_opt_and_function_body
(cp_parser *);
/* Classes [gram.class] */
static tree cp_parser_class_name
(cp_parser *, bool, bool, enum tag_types, bool, bool, bool);
static tree cp_parser_class_specifier
(cp_parser *);
static tree cp_parser_class_head
(cp_parser *, bool *);
static enum tag_types cp_parser_class_key
(cp_parser *);
static void cp_parser_member_specification_opt
(cp_parser *);
static void cp_parser_member_declaration
(cp_parser *);
static tree cp_parser_pure_specifier
(cp_parser *);
static tree cp_parser_constant_initializer
(cp_parser *);
/* Derived classes [gram.class.derived] */
static tree cp_parser_base_clause
(cp_parser *);
static tree cp_parser_base_specifier
(cp_parser *);
/* Special member functions [gram.special] */
static tree cp_parser_conversion_function_id
(cp_parser *);
static tree cp_parser_conversion_type_id
(cp_parser *);
static cp_declarator *cp_parser_conversion_declarator_opt
(cp_parser *);
static bool cp_parser_ctor_initializer_opt
(cp_parser *);
static void cp_parser_mem_initializer_list
(cp_parser *);
static tree cp_parser_mem_initializer
(cp_parser *);
static tree cp_parser_mem_initializer_id
(cp_parser *);
/* Overloading [gram.over] */
static tree cp_parser_operator_function_id
(cp_parser *);
static tree cp_parser_operator
(cp_parser *);
/* Templates [gram.temp] */
static void cp_parser_template_declaration
(cp_parser *, bool);
static tree cp_parser_template_parameter_list
(cp_parser *);
static tree cp_parser_template_parameter
(cp_parser *, bool *, bool *);
static tree cp_parser_type_parameter
(cp_parser *, bool *);
static tree cp_parser_template_id
(cp_parser *, bool, bool, bool);
static tree cp_parser_template_name
(cp_parser *, bool, bool, bool, bool *);
static tree cp_parser_template_argument_list
(cp_parser *);
static tree cp_parser_template_argument
(cp_parser *);
static void cp_parser_explicit_instantiation
(cp_parser *);
static void cp_parser_explicit_specialization
(cp_parser *);
/* Exception handling [gram.exception] */
static tree cp_parser_try_block
(cp_parser *);
static bool cp_parser_function_try_block
(cp_parser *);
static void cp_parser_handler_seq
(cp_parser *);
static void cp_parser_handler
(cp_parser *);
static tree cp_parser_exception_declaration
(cp_parser *);
static tree cp_parser_throw_expression
(cp_parser *);
static tree cp_parser_exception_specification_opt
(cp_parser *);
static tree cp_parser_type_id_list
(cp_parser *);
/* GNU Extensions */
static tree cp_parser_asm_specification_opt
(cp_parser *);
static tree cp_parser_asm_operand_list
(cp_parser *);
static tree cp_parser_asm_clobber_list
(cp_parser *);
static tree cp_parser_asm_label_list
(cp_parser *);
static tree cp_parser_attributes_opt
(cp_parser *);
static tree cp_parser_attribute_list
(cp_parser *);
static bool cp_parser_extension_opt
(cp_parser *, int *);
static void cp_parser_label_declaration
(cp_parser *);
/* Transactional Memory Extensions */
static tree cp_parser_transaction
(cp_parser *, enum rid);
static tree cp_parser_transaction_expression
(cp_parser *, enum rid);
static bool cp_parser_function_transaction
(cp_parser *, enum rid);
static tree cp_parser_transaction_cancel
(cp_parser *);
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool cp_parser_pragma
(cp_parser *, enum pragma_context);
/* Objective-C++ Productions */
static tree cp_parser_objc_message_receiver
(cp_parser *);
static tree cp_parser_objc_message_args
(cp_parser *);
static tree cp_parser_objc_message_expression
(cp_parser *);
static tree cp_parser_objc_encode_expression
(cp_parser *);
static tree cp_parser_objc_defs_expression
(cp_parser *);
static tree cp_parser_objc_protocol_expression
(cp_parser *);
static tree cp_parser_objc_selector_expression
(cp_parser *);
static tree cp_parser_objc_expression
(cp_parser *);
static bool cp_parser_objc_selector_p
(enum cpp_ttype);
static tree cp_parser_objc_selector
(cp_parser *);
static tree cp_parser_objc_protocol_refs_opt
(cp_parser *);
static void cp_parser_objc_declaration
(cp_parser *, tree);
static tree cp_parser_objc_statement
(cp_parser *);
static bool cp_parser_objc_valid_prefix_attributes
(cp_parser *, tree *);
static void cp_parser_objc_at_property_declaration
(cp_parser *) ;
static void cp_parser_objc_at_synthesize_declaration
(cp_parser *) ;
static void cp_parser_objc_at_dynamic_declaration
(cp_parser *) ;
static tree cp_parser_objc_struct_declaration
(cp_parser *) ;
/* Utility Routines */
static tree cp_parser_lookup_name
(cp_parser *, tree, enum tag_types, bool, bool, bool, tree *, location_t);
static tree cp_parser_lookup_name_simple
(cp_parser *, tree, location_t);
static tree cp_parser_maybe_treat_template_as_class
(tree, bool);
static bool cp_parser_check_declarator_template_parameters
(cp_parser *, cp_declarator *, location_t);
static bool cp_parser_check_template_parameters
(cp_parser *, unsigned, location_t, cp_declarator *);
static tree cp_parser_simple_cast_expression
(cp_parser *);
static tree cp_parser_global_scope_opt
(cp_parser *, bool);
static bool cp_parser_constructor_declarator_p
(cp_parser *, bool);
static tree cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *);
static tree cp_parser_function_definition_after_declarator
(cp_parser *, bool);
static void cp_parser_template_declaration_after_export
(cp_parser *, bool);
static void cp_parser_perform_template_parameter_access_checks
(VEC (deferred_access_check,gc)*);
static tree cp_parser_single_declaration
(cp_parser *, VEC (deferred_access_check,gc)*, bool, bool, bool *);
static tree cp_parser_functional_cast
(cp_parser *, tree);
static tree cp_parser_save_member_function_body
(cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree);
static tree cp_parser_save_nsdmi
(cp_parser *);
static tree cp_parser_enclosed_template_argument_list
(cp_parser *);
static void cp_parser_save_default_args
(cp_parser *, tree);
static void cp_parser_late_parsing_for_member
(cp_parser *, tree);
static tree cp_parser_late_parse_one_default_arg
(cp_parser *, tree, tree, tree);
static void cp_parser_late_parsing_nsdmi
(cp_parser *, tree);
static void cp_parser_late_parsing_default_args
(cp_parser *, tree);
static tree cp_parser_sizeof_operand
(cp_parser *, enum rid);
static tree cp_parser_trait_expr
(cp_parser *, enum rid);
static bool cp_parser_declares_only_class_p
(cp_parser *);
static void cp_parser_set_storage_class
(cp_parser *, cp_decl_specifier_seq *, enum rid, location_t);
static void cp_parser_set_decl_spec_type
(cp_decl_specifier_seq *, tree, location_t, bool);
static bool cp_parser_friend_p
(const cp_decl_specifier_seq *);
static void cp_parser_required_error
(cp_parser *, required_token, bool);
static cp_token *cp_parser_require
(cp_parser *, enum cpp_ttype, required_token);
static cp_token *cp_parser_require_keyword
(cp_parser *, enum rid, required_token);
static bool cp_parser_token_starts_function_definition_p
(cp_token *);
static bool cp_parser_next_token_starts_class_definition_p
(cp_parser *);
static bool cp_parser_next_token_ends_template_argument_p
(cp_parser *);
static bool cp_parser_nth_token_starts_template_argument_list_p
(cp_parser *, size_t);
static enum tag_types cp_parser_token_is_class_key
(cp_token *);
static void cp_parser_check_class_key
(enum tag_types, tree type);
static void cp_parser_check_access_in_redeclaration
(tree type, location_t location);
static bool cp_parser_optional_template_keyword
(cp_parser *);
static void cp_parser_pre_parsed_nested_name_specifier
(cp_parser *);
static bool cp_parser_cache_group
(cp_parser *, enum cpp_ttype, unsigned);
static tree cp_parser_cache_defarg
(cp_parser *parser, bool nsdmi);
static void cp_parser_parse_tentatively
(cp_parser *);
static void cp_parser_commit_to_tentative_parse
(cp_parser *);
static void cp_parser_abort_tentative_parse
(cp_parser *);
static bool cp_parser_parse_definitely
(cp_parser *);
static inline bool cp_parser_parsing_tentatively
(cp_parser *);
static bool cp_parser_uncommitted_to_tentative_parse_p
(cp_parser *);
static void cp_parser_error
(cp_parser *, const char *);
static void cp_parser_name_lookup_error
(cp_parser *, tree, tree, name_lookup_error, location_t);
static bool cp_parser_simulate_error
(cp_parser *);
static bool cp_parser_check_type_definition
(cp_parser *);
static void cp_parser_check_for_definition_in_return_type
(cp_declarator *, tree, location_t type_location);
static void cp_parser_check_for_invalid_template_id
(cp_parser *, tree, location_t location);
static bool cp_parser_non_integral_constant_expression
(cp_parser *, non_integral_constant);
static void cp_parser_diagnose_invalid_type_name
(cp_parser *, tree, tree, location_t);
static bool cp_parser_parse_and_diagnose_invalid_type_name
(cp_parser *);
static int cp_parser_skip_to_closing_parenthesis
(cp_parser *, bool, bool, bool);
static void cp_parser_skip_to_end_of_statement
(cp_parser *);
static void cp_parser_consume_semicolon_at_end_of_statement
(cp_parser *);
static void cp_parser_skip_to_end_of_block_or_statement
(cp_parser *);
static bool cp_parser_skip_to_closing_brace
(cp_parser *);
static void cp_parser_skip_to_end_of_template_parameter_list
(cp_parser *);
static void cp_parser_skip_to_pragma_eol
(cp_parser*, cp_token *);
static bool cp_parser_error_occurred
(cp_parser *);
static bool cp_parser_allow_gnu_extensions_p
(cp_parser *);
static bool cp_parser_is_pure_string_literal
(cp_token *);
static bool cp_parser_is_string_literal
(cp_token *);
static bool cp_parser_is_keyword
(cp_token *, enum rid);
static tree cp_parser_make_typename_type
(cp_parser *, tree, tree, location_t location);
static cp_declarator * cp_parser_make_indirect_declarator
(enum tree_code, tree, cp_cv_quals, cp_declarator *);
/* Returns nonzero if we are parsing tentatively. */
static inline bool
cp_parser_parsing_tentatively (cp_parser* parser)
{
return parser->context->next != NULL;
}
/* Returns nonzero if TOKEN is a string literal. */
static bool
cp_parser_is_pure_string_literal (cp_token* token)
{
return (token->type == CPP_STRING ||
token->type == CPP_STRING16 ||
token->type == CPP_STRING32 ||
token->type == CPP_WSTRING ||
token->type == CPP_UTF8STRING);
}
/* Returns nonzero if TOKEN is a string literal
of a user-defined string literal. */
static bool
cp_parser_is_string_literal (cp_token* token)
{
return (cp_parser_is_pure_string_literal (token) ||
token->type == CPP_STRING_USERDEF ||
token->type == CPP_STRING16_USERDEF ||
token->type == CPP_STRING32_USERDEF ||
token->type == CPP_WSTRING_USERDEF ||
token->type == CPP_UTF8STRING_USERDEF);
}
/* Returns nonzero if TOKEN is the indicated KEYWORD. */
static bool
cp_parser_is_keyword (cp_token* token, enum rid keyword)
{
return token->keyword == keyword;
}
/* If not parsing tentatively, issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream. MESSAGE
(specified by the caller) is usually of the form "expected
OTHER-TOKEN". */
static void
cp_parser_error (cp_parser* parser, const char* gmsgid)
{
if (!cp_parser_simulate_error (parser))
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* This diagnostic makes more sense if it is tagged to the line
of the token we just peeked at. */
cp_lexer_set_source_position_from_token (token);
if (token->type == CPP_PRAGMA)
{
error_at (token->location,
"%<#pragma%> is not allowed here");
cp_parser_skip_to_pragma_eol (parser, token);
return;
}
c_parse_error (gmsgid,
/* Because c_parser_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
token->u.value, token->flags);
}
}
/* Issue an error about name-lookup failing. NAME is the
IDENTIFIER_NODE DECL is the result of
the lookup (as returned from cp_parser_lookup_name). DESIRED is
the thing that we hoped to find. */
static void
cp_parser_name_lookup_error (cp_parser* parser,
tree name,
tree decl,
name_lookup_error desired,
location_t location)
{
/* If name lookup completely failed, tell the user that NAME was not
declared. */
if (decl == error_mark_node)
{
if (parser->scope && parser->scope != global_namespace)
error_at (location, "%<%E::%E%> has not been declared",
parser->scope, name);
else if (parser->scope == global_namespace)
error_at (location, "%<::%E%> has not been declared", name);
else if (parser->object_scope
&& !CLASS_TYPE_P (parser->object_scope))
error_at (location, "request for member %qE in non-class type %qT",
name, parser->object_scope);
else if (parser->object_scope)
error_at (location, "%<%T::%E%> has not been declared",
parser->object_scope, name);
else
error_at (location, "%qE has not been declared", name);
}
else if (parser->scope && parser->scope != global_namespace)
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%<%E::%E%> is not a type",
parser->scope, name);
break;
case NLE_CXX98:
error_at (location, "%<%E::%E%> is not a class or namespace",
parser->scope, name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%<%E::%E%> is not a class, namespace, or enumeration",
parser->scope, name);
break;
default:
gcc_unreachable ();
}
}
else if (parser->scope == global_namespace)
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%<::%E%> is not a type", name);
break;
case NLE_CXX98:
error_at (location, "%<::%E%> is not a class or namespace", name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%<::%E%> is not a class, namespace, or enumeration",
name);
break;
default:
gcc_unreachable ();
}
}
else
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%qE is not a type", name);
break;
case NLE_CXX98:
error_at (location, "%qE is not a class or namespace", name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%qE is not a class, namespace, or enumeration", name);
break;
default:
gcc_unreachable ();
}
}
}
/* If we are parsing tentatively, remember that an error has occurred
during this tentative parse. Returns true if the error was
simulated; false if a message should be issued by the caller. */
static bool
cp_parser_simulate_error (cp_parser* parser)
{
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
parser->context->status = CP_PARSER_STATUS_KIND_ERROR;
return true;
}
return false;
}
/* Check for repeated decl-specifiers. */
static void
cp_parser_check_decl_spec (cp_decl_specifier_seq *decl_specs,
location_t location)
{
int ds;
for (ds = ds_first; ds != ds_last; ++ds)
{
unsigned count = decl_specs->specs[ds];
if (count < 2)
continue;
/* The "long" specifier is a special case because of "long long". */
if (ds == ds_long)
{
if (count > 2)
error_at (location, "%<long long long%> is too long for GCC");
else
pedwarn_cxx98 (location, OPT_Wlong_long,
"ISO C++ 1998 does not support %<long long%>");
}
else if (count > 1)
{
static const char *const decl_spec_names[] = {
"signed",
"unsigned",
"short",
"long",
"const",
"volatile",
"restrict",
"inline",
"virtual",
"explicit",
"friend",
"typedef",
"using",
"constexpr",
"__complex",
"__thread"
};
error_at (location, "duplicate %qs", decl_spec_names[ds]);
}
}
}
/* This function is called when a type is defined. If type
definitions are forbidden at this point, an error message is
issued. */
static bool
cp_parser_check_type_definition (cp_parser* parser)
{
/* If types are forbidden here, issue a message. */
if (parser->type_definition_forbidden_message)
{
/* Don't use `%s' to print the string, because quotations (`%<', `%>')
in the message need to be interpreted. */
error (parser->type_definition_forbidden_message);
return false;
}
return true;
}
/* This function is called when the DECLARATOR is processed. The TYPE
was a type defined in the decl-specifiers. If it is invalid to
define a type in the decl-specifiers for DECLARATOR, an error is
issued. TYPE_LOCATION is the location of TYPE and is used
for error reporting. */
static void
cp_parser_check_for_definition_in_return_type (cp_declarator *declarator,
tree type, location_t type_location)
{
/* [dcl.fct] forbids type definitions in return types.
Unfortunately, it's not easy to know whether or not we are
processing a return type until after the fact. */
while (declarator
&& (declarator->kind == cdk_pointer
|| declarator->kind == cdk_reference
|| declarator->kind == cdk_ptrmem))
declarator = declarator->declarator;
if (declarator
&& declarator->kind == cdk_function)
{
error_at (type_location,
"new types may not be defined in a return type");
inform (type_location,
"(perhaps a semicolon is missing after the definition of %qT)",
type);
}
}
/* A type-specifier (TYPE) has been parsed which cannot be followed by
"<" in any valid C++ program. If the next token is indeed "<",
issue a message warning the user about what appears to be an
invalid attempt to form a template-id. LOCATION is the location
of the type-specifier (TYPE) */
static void
cp_parser_check_for_invalid_template_id (cp_parser* parser,
tree type, location_t location)
{
cp_token_position start = 0;
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
if (TYPE_P (type))
error_at (location, "%qT is not a template", type);
else if (TREE_CODE (type) == IDENTIFIER_NODE)
error_at (location, "%qE is not a template", type);
else
error_at (location, "invalid template-id");
/* Remember the location of the invalid "<". */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Consume the "<". */
cp_lexer_consume_token (parser->lexer);
/* Parse the template arguments. */
cp_parser_enclosed_template_argument_list (parser);
/* Permanently remove the invalid template arguments so that
this error message is not issued again. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
}
}
/* If parsing an integral constant-expression, issue an error message
about the fact that THING appeared and return true. Otherwise,
return false. In either case, set
PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */
static bool
cp_parser_non_integral_constant_expression (cp_parser *parser,
non_integral_constant thing)
{
parser->non_integral_constant_expression_p = true;
if (parser->integral_constant_expression_p)
{
if (!parser->allow_non_integral_constant_expression_p)
{
const char *msg = NULL;
switch (thing)
{
case NIC_FLOAT:
error ("floating-point literal "
"cannot appear in a constant-expression");
return true;
case NIC_CAST:
error ("a cast to a type other than an integral or "
"enumeration type cannot appear in a "
"constant-expression");
return true;
case NIC_TYPEID:
error ("%<typeid%> operator "
"cannot appear in a constant-expression");
return true;
case NIC_NCC:
error ("non-constant compound literals "
"cannot appear in a constant-expression");
return true;
case NIC_FUNC_CALL:
error ("a function call "
"cannot appear in a constant-expression");
return true;
case NIC_INC:
error ("an increment "
"cannot appear in a constant-expression");
return true;
case NIC_DEC:
error ("an decrement "
"cannot appear in a constant-expression");
return true;
case NIC_ARRAY_REF:
error ("an array reference "
"cannot appear in a constant-expression");
return true;
case NIC_ADDR_LABEL:
error ("the address of a label "
"cannot appear in a constant-expression");
return true;
case NIC_OVERLOADED:
error ("calls to overloaded operators "
"cannot appear in a constant-expression");
return true;
case NIC_ASSIGNMENT:
error ("an assignment cannot appear in a constant-expression");
return true;
case NIC_COMMA:
error ("a comma operator "
"cannot appear in a constant-expression");
return true;
case NIC_CONSTRUCTOR:
error ("a call to a constructor "
"cannot appear in a constant-expression");
return true;
case NIC_TRANSACTION:
error ("a transaction expression "
"cannot appear in a constant-expression");
return true;
case NIC_THIS:
msg = "this";
break;
case NIC_FUNC_NAME:
msg = "__FUNCTION__";
break;
case NIC_PRETTY_FUNC:
msg = "__PRETTY_FUNCTION__";
break;
case NIC_C99_FUNC:
msg = "__func__";
break;
case NIC_VA_ARG:
msg = "va_arg";
break;
case NIC_ARROW:
msg = "->";
break;
case NIC_POINT:
msg = ".";
break;
case NIC_STAR:
msg = "*";
break;
case NIC_ADDR:
msg = "&";
break;
case NIC_PREINCREMENT:
msg = "++";
break;
case NIC_PREDECREMENT:
msg = "--";
break;
case NIC_NEW:
msg = "new";
break;
case NIC_DEL:
msg = "delete";
break;
default:
gcc_unreachable ();
}
if (msg)
error ("%qs cannot appear in a constant-expression", msg);
return true;
}
}
return false;
}
/* Emit a diagnostic for an invalid type name. SCOPE is the
qualifying scope (or NULL, if none) for ID. This function commits
to the current active tentative parse, if any. (Otherwise, the
problematic construct might be encountered again later, resulting
in duplicate error messages.) LOCATION is the location of ID. */
static void
cp_parser_diagnose_invalid_type_name (cp_parser *parser,
tree scope, tree id,
location_t location)
{
tree decl, old_scope;
cp_parser_commit_to_tentative_parse (parser);
/* Try to lookup the identifier. */
old_scope = parser->scope;
parser->scope = scope;
decl = cp_parser_lookup_name_simple (parser, id, location);
parser->scope = old_scope;
/* If the lookup found a template-name, it means that the user forgot
to specify an argument list. Emit a useful error message. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
error_at (location,
"invalid use of template-name %qE without an argument list",
decl);
else if (TREE_CODE (id) == BIT_NOT_EXPR)
error_at (location, "invalid use of destructor %qD as a type", id);
else if (TREE_CODE (decl) == TYPE_DECL)
/* Something like 'unsigned A a;' */
error_at (location, "invalid combination of multiple type-specifiers");
else if (!parser->scope)
{
/* Issue an error message. */
error_at (location, "%qE does not name a type", id);
/* If we're in a template class, it's possible that the user was
referring to a type from a base class. For example:
template <typename T> struct A { typedef T X; };
template <typename T> struct B : public A<T> { X x; };
The user should have said "typename A<T>::X". */
if (cxx_dialect < cxx0x && id == ridpointers[(int)RID_CONSTEXPR])
inform (location, "C++11 %<constexpr%> only available with "
"-std=c++11 or -std=gnu++11");
else if (processing_template_decl && current_class_type
&& TYPE_BINFO (current_class_type))
{
tree b;
for (b = TREE_CHAIN (TYPE_BINFO (current_class_type));
b;
b = TREE_CHAIN (b))
{
tree base_type = BINFO_TYPE (b);
if (CLASS_TYPE_P (base_type)
&& dependent_type_p (base_type))
{
tree field;
/* Go from a particular instantiation of the
template (which will have an empty TYPE_FIELDs),
to the main version. */
base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type);
for (field = TYPE_FIELDS (base_type);
field;
field = DECL_CHAIN (field))
if (TREE_CODE (field) == TYPE_DECL
&& DECL_NAME (field) == id)
{
inform (location,
"(perhaps %<typename %T::%E%> was intended)",
BINFO_TYPE (b), id);
break;
}
if (field)
break;
}
}
}
}
/* Here we diagnose qualified-ids where the scope is actually correct,
but the identifier does not resolve to a valid type name. */
else if (parser->scope != error_mark_node)
{
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
error_at (location, "%qE in namespace %qE does not name a type",
id, parser->scope);
else if (CLASS_TYPE_P (parser->scope)
&& constructor_name_p (id, parser->scope))
{
/* A<T>::A<T>() */
error_at (location, "%<%T::%E%> names the constructor, not"
" the type", parser->scope, id);
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
error_at (location, "and %qT has no template constructors",
parser->scope);
}
else if (TYPE_P (parser->scope)
&& dependent_scope_p (parser->scope))
error_at (location, "need %<typename%> before %<%T::%E%> because "
"%qT is a dependent scope",
parser->scope, id, parser->scope);
else if (TYPE_P (parser->scope))
error_at (location, "%qE in %q#T does not name a type",
id, parser->scope);
else
gcc_unreachable ();
}
}
/* Check for a common situation where a type-name should be present,
but is not, and issue a sensible error message. Returns true if an
invalid type-name was detected.
The situation handled by this function are variable declarations of the
form `ID a', where `ID' is an id-expression and `a' is a plain identifier.
Usually, `ID' should name a type, but if we got here it means that it
does not. We try to emit the best possible error message depending on
how exactly the id-expression looks like. */
static bool
cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser)
{
tree id;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Avoid duplicate error about ambiguous lookup. */
if (token->type == CPP_NESTED_NAME_SPECIFIER)
{
cp_token *next = cp_lexer_peek_nth_token (parser->lexer, 2);
if (next->type == CPP_NAME && next->ambiguous_p)
goto out;
}
cp_parser_parse_tentatively (parser);
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/true,
/*optional_p=*/false);
/* If the next token is a (, this is a function with no explicit return
type, i.e. constructor, destructor or conversion op. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| TREE_CODE (id) == TYPE_DECL)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
if (!cp_parser_parse_definitely (parser))
return false;
/* Emit a diagnostic for the invalid type. */
cp_parser_diagnose_invalid_type_name (parser, parser->scope,
id, token->location);
out:
/* If we aren't in the middle of a declarator (i.e. in a
parameter-declaration-clause), skip to the end of the declaration;
there's no point in trying to process it. */
if (!parser->in_declarator_p)
cp_parser_skip_to_end_of_block_or_statement (parser);
return true;
}
/* Consume tokens up to, and including, the next non-nested closing `)'.
Returns 1 iff we found a closing `)'. RECOVERING is true, if we
are doing error recovery. Returns -1 if OR_COMMA is true and we
found an unnested comma. */
static int
cp_parser_skip_to_closing_parenthesis (cp_parser *parser,
bool recovering,
bool or_comma,
bool consume_paren)
{
unsigned paren_depth = 0;
unsigned brace_depth = 0;
unsigned square_depth = 0;
if (recovering && !or_comma
&& cp_parser_uncommitted_to_tentative_parse_p (parser))
return 0;
while (true)
{
cp_token * token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, then there is no closing `)'. */
return 0;
/* This is good for lambda expression capture-lists. */
case CPP_OPEN_SQUARE:
++square_depth;
break;
case CPP_CLOSE_SQUARE:
if (!square_depth--)
return 0;
break;
case CPP_SEMICOLON:
/* This matches the processing in skip_to_end_of_statement. */
if (!brace_depth)
return 0;
break;
case CPP_OPEN_BRACE:
++brace_depth;
break;
case CPP_CLOSE_BRACE:
if (!brace_depth--)
return 0;
break;
case CPP_COMMA:
if (recovering && or_comma && !brace_depth && !paren_depth
&& !square_depth)
return -1;
break;
case CPP_OPEN_PAREN:
if (!brace_depth)
++paren_depth;
break;
case CPP_CLOSE_PAREN:
if (!brace_depth && !paren_depth--)
{
if (consume_paren)
cp_lexer_consume_token (parser->lexer);
return 1;
}
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the current statement.
Normally, that will be just before consuming a `;'. However, if a
non-nested `}' comes first, then we stop before consuming that. */
static void
cp_parser_skip_to_end_of_statement (cp_parser* parser)
{
unsigned nesting_depth = 0;
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* If the next token is a `;', we have reached the end of the
statement. */
if (!nesting_depth)
return;
break;
case CPP_CLOSE_BRACE:
/* If this is a non-nested '}', stop before consuming it.
That way, when confronted with something like:
{ 3 + }
we stop before consuming the closing '}', even though we
have not yet reached a `;'. */
if (nesting_depth == 0)
return;
/* If it is the closing '}' for a block that we have
scanned, stop -- but only after consuming the token.
That way given:
void f g () { ... }
typedef int I;
we will stop after the body of the erroneously declared
function, but before consuming the following `typedef'
declaration. */
if (--nesting_depth == 0)
{
cp_lexer_consume_token (parser->lexer);
return;
}
case CPP_OPEN_BRACE:
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* This function is called at the end of a statement or declaration.
If the next token is a semicolon, it is consumed; otherwise, error
recovery is attempted. */
static void
cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser)
{
/* Look for the trailing `;'. */
if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))
{
/* If there is additional (erroneous) input, skip to the end of
the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested `;'. */
static void
cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser)
{
int nesting_depth = 0;
while (nesting_depth >= 0)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* Stop if this is an unnested ';'. */
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_CLOSE_BRACE:
/* Stop if this is an unnested '}', or closes the outermost
nesting level. */
nesting_depth--;
if (nesting_depth < 0)
return;
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_OPEN_BRACE:
/* Nest. */
nesting_depth++;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until a non-nested closing curly brace is the next
token, or there are no more tokens. Return true in the first case,
false otherwise. */
static bool
cp_parser_skip_to_closing_brace (cp_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return false;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested `}', then we have reached
the end of the current block. */
if (nesting_depth-- == 0)
return true;
break;
case CPP_OPEN_BRACE:
/* If it the next token is a `{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK
parameter is the PRAGMA token, allowing us to purge the entire pragma
sequence. */
static void
cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok)
{
cp_token *token;
parser->lexer->in_pragma = false;
do
token = cp_lexer_consume_token (parser->lexer);
while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF);
/* Ensure that the pragma is not parsed again. */
cp_lexer_purge_tokens_after (parser->lexer, pragma_tok);
}
/* Require pragma end of line, resyncing with it as necessary. The
arguments are as for cp_parser_skip_to_pragma_eol. */
static void
cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok)
{
parser->lexer->in_pragma = false;
if (!cp_parser_require (parser, CPP_PRAGMA_EOL, RT_PRAGMA_EOL))
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
}
/* This is a simple wrapper around make_typename_type. When the id is
an unresolved identifier node, we can provide a superior diagnostic
using cp_parser_diagnose_invalid_type_name. */
static tree
cp_parser_make_typename_type (cp_parser *parser, tree scope,
tree id, location_t id_location)
{
tree result;
if (TREE_CODE (id) == IDENTIFIER_NODE)
{
result = make_typename_type (scope, id, typename_type,
/*complain=*/tf_none);
if (result == error_mark_node)
cp_parser_diagnose_invalid_type_name (parser, scope, id, id_location);
return result;
}
return make_typename_type (scope, id, typename_type, tf_error);
}
/* This is a wrapper around the
make_{pointer,ptrmem,reference}_declarator functions that decides
which one to call based on the CODE and CLASS_TYPE arguments. The
CODE argument should be one of the values returned by
cp_parser_ptr_operator. */
static cp_declarator *
cp_parser_make_indirect_declarator (enum tree_code code, tree class_type,
cp_cv_quals cv_qualifiers,
cp_declarator *target)
{
if (code == ERROR_MARK)
return cp_error_declarator;
if (code == INDIRECT_REF)
if (class_type == NULL_TREE)
return make_pointer_declarator (cv_qualifiers, target);
else
return make_ptrmem_declarator (cv_qualifiers, class_type, target);
else if (code == ADDR_EXPR && class_type == NULL_TREE)
return make_reference_declarator (cv_qualifiers, target, false);
else if (code == NON_LVALUE_EXPR && class_type == NULL_TREE)
return make_reference_declarator (cv_qualifiers, target, true);
gcc_unreachable ();
}
/* Create a new C++ parser. */
static cp_parser *
cp_parser_new (void)
{
cp_parser *parser;
cp_lexer *lexer;
unsigned i;
/* cp_lexer_new_main is called before doing GC allocation because
cp_lexer_new_main might load a PCH file. */
lexer = cp_lexer_new_main ();
/* Initialize the binops_by_token so that we can get the tree
directly from the token. */
for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++)
binops_by_token[binops[i].token_type] = binops[i];
parser = ggc_alloc_cleared_cp_parser ();
parser->lexer = lexer;
parser->context = cp_parser_context_new (NULL);
/* For now, we always accept GNU extensions. */
parser->allow_gnu_extensions_p = 1;
/* The `>' token is a greater-than operator, not the end of a
template-id. */
parser->greater_than_is_operator_p = true;
parser->default_arg_ok_p = true;
/* We are not parsing a constant-expression. */
parser->integral_constant_expression_p = false;
parser->allow_non_integral_constant_expression_p = false;
parser->non_integral_constant_expression_p = false;
/* Local variable names are not forbidden. */
parser->local_variables_forbidden_p = false;
/* We are not processing an `extern "C"' declaration. */
parser->in_unbraced_linkage_specification_p = false;
/* We are not processing a declarator. */
parser->in_declarator_p = false;
/* We are not processing a template-argument-list. */
parser->in_template_argument_list_p = false;
/* We are not in an iteration statement. */
parser->in_statement = 0;
/* We are not in a switch statement. */
parser->in_switch_statement_p = false;
/* We are not parsing a type-id inside an expression. */
parser->in_type_id_in_expr_p = false;
/* Declarations aren't implicitly extern "C". */
parser->implicit_extern_c = false;
/* String literals should be translated to the execution character set. */
parser->translate_strings_p = true;
/* We are not parsing a function body. */
parser->in_function_body = false;
/* We can correct until told otherwise. */
parser->colon_corrects_to_scope_p = true;
/* The unparsed function queue is empty. */
push_unparsed_function_queues (parser);
/* There are no classes being defined. */
parser->num_classes_being_defined = 0;
/* No template parameters apply. */
parser->num_template_parameter_lists = 0;
return parser;
}
/* Create a cp_lexer structure which will emit the tokens in CACHE
and push it onto the parser's lexer stack. This is used for delayed
parsing of in-class method bodies and default arguments, and should
not be confused with tentative parsing. */
static void
cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache)
{
cp_lexer *lexer = cp_lexer_new_from_tokens (cache);
lexer->next = parser->lexer;
parser->lexer = lexer;
/* Move the current source position to that of the first token in the
new lexer. */
cp_lexer_set_source_position_from_token (lexer->next_token);
}
/* Pop the top lexer off the parser stack. This is never used for the
"main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */
static void
cp_parser_pop_lexer (cp_parser *parser)
{
cp_lexer *lexer = parser->lexer;
parser->lexer = lexer->next;
cp_lexer_destroy (lexer);
/* Put the current source position back where it was before this
lexer was pushed. */
cp_lexer_set_source_position_from_token (parser->lexer->next_token);
}
/* Lexical conventions [gram.lex] */
/* Parse an identifier. Returns an IDENTIFIER_NODE representing the
identifier. */
static tree
cp_parser_identifier (cp_parser* parser)
{
cp_token *token;
/* Look for the identifier. */
token = cp_parser_require (parser, CPP_NAME, RT_NAME);
/* Return the value. */
return token ? token->u.value : error_mark_node;
}
/* Parse a sequence of adjacent string constants. Returns a
TREE_STRING representing the combined, nul-terminated string
constant. If TRANSLATE is true, translate the string to the
execution character set. If WIDE_OK is true, a wide string is
invalid here.
C++98 [lex.string] says that if a narrow string literal token is
adjacent to a wide string literal token, the behavior is undefined.
However, C99 6.4.5p4 says that this results in a wide string literal.
We follow C99 here, for consistency with the C front end.
This code is largely lifted from lex_string() in c-lex.c.
FUTURE: ObjC++ will need to handle @-strings here. */
static tree
cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok)
{
tree value;
size_t count;
struct obstack str_ob;
cpp_string str, istr, *strs;
cp_token *tok;
enum cpp_ttype type, curr_type;
int have_suffix_p = 0;
tree string_tree;
tree suffix_id = NULL_TREE;
bool curr_tok_is_userdef_p = false;
tok = cp_lexer_peek_token (parser->lexer);
if (!cp_parser_is_string_literal (tok))
{
cp_parser_error (parser, "expected string-literal");
return error_mark_node;
}
if (cpp_userdef_string_p (tok->type))
{
string_tree = USERDEF_LITERAL_VALUE (tok->u.value);
curr_type = cpp_userdef_string_remove_type (tok->type);
curr_tok_is_userdef_p = true;
}
else
{
string_tree = tok->u.value;
curr_type = tok->type;
}
type = curr_type;
/* Try to avoid the overhead of creating and destroying an obstack
for the common case of just one string. */
if (!cp_parser_is_string_literal
(cp_lexer_peek_nth_token (parser->lexer, 2)))
{
cp_lexer_consume_token (parser->lexer);
str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
count = 1;
if (curr_tok_is_userdef_p)
{
suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value);
have_suffix_p = 1;
curr_type = cpp_userdef_string_remove_type (tok->type);
}
else
curr_type = tok->type;
strs = &str;
}
else
{
gcc_obstack_init (&str_ob);
count = 0;
do
{
cp_lexer_consume_token (parser->lexer);
count++;
str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
if (curr_tok_is_userdef_p)
{
tree curr_suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value);
if (have_suffix_p == 0)
{
suffix_id = curr_suffix_id;
have_suffix_p = 1;
}
else if (have_suffix_p == 1
&& curr_suffix_id != suffix_id)
{
error ("inconsistent user-defined literal suffixes"
" %qD and %qD in string literal",
suffix_id, curr_suffix_id);
have_suffix_p = -1;
}
curr_type = cpp_userdef_string_remove_type (tok->type);
}
else
curr_type = tok->type;
if (type != curr_type)
{
if (type == CPP_STRING)
type = curr_type;
else if (curr_type != CPP_STRING)
error_at (tok->location,
"unsupported non-standard concatenation "
"of string literals");
}
obstack_grow (&str_ob, &str, sizeof (cpp_string));
tok = cp_lexer_peek_token (parser->lexer);
if (cpp_userdef_string_p (tok->type))
{
string_tree = USERDEF_LITERAL_VALUE (tok->u.value);
curr_type = cpp_userdef_string_remove_type (tok->type);
curr_tok_is_userdef_p = true;
}
else
{
string_tree = tok->u.value;
curr_type = tok->type;
curr_tok_is_userdef_p = false;
}
}
while (cp_parser_is_string_literal (tok));
strs = (cpp_string *) obstack_finish (&str_ob);
}
if (type != CPP_STRING && !wide_ok)
{
cp_parser_error (parser, "a wide string is invalid in this context");
type = CPP_STRING;
}
if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate)
(parse_in, strs, count, &istr, type))
{
value = build_string (istr.len, (const char *)istr.text);
free (CONST_CAST (unsigned char *, istr.text));
switch (type)
{
default:
case CPP_STRING:
case CPP_UTF8STRING:
TREE_TYPE (value) = char_array_type_node;
break;
case CPP_STRING16:
TREE_TYPE (value) = char16_array_type_node;
break;
case CPP_STRING32:
TREE_TYPE (value) = char32_array_type_node;
break;
case CPP_WSTRING:
TREE_TYPE (value) = wchar_array_type_node;
break;
}
value = fix_string_type (value);
if (have_suffix_p)
{
tree literal = build_userdef_literal (suffix_id, value, NULL_TREE);
tok->u.value = literal;
return cp_parser_userdef_string_literal (tok);
}
}
else
/* cpp_interpret_string has issued an error. */
value = error_mark_node;
if (count > 1)
obstack_free (&str_ob, 0);
return value;
}
/* Look up a literal operator with the name and the exact arguments. */
static tree
lookup_literal_operator (tree name, VEC(tree,gc) *args)
{
tree decl, fns;
decl = lookup_name (name);
if (!decl || !is_overloaded_fn (decl))
return error_mark_node;
for (fns = decl; fns; fns = OVL_NEXT (fns))
{
unsigned int ix;
bool found = true;
tree fn = OVL_CURRENT (fns);
tree argtypes = NULL_TREE;
argtypes = TYPE_ARG_TYPES (TREE_TYPE (fn));
if (argtypes != NULL_TREE)
{
for (ix = 0; ix < VEC_length (tree, args) && argtypes != NULL_TREE;
++ix, argtypes = TREE_CHAIN (argtypes))
{
tree targ = TREE_VALUE (argtypes);
tree tparm = TREE_TYPE (VEC_index (tree, args, ix));
bool ptr = TREE_CODE (targ) == POINTER_TYPE;
bool arr = TREE_CODE (tparm) == ARRAY_TYPE;
if ((ptr || arr || !same_type_p (targ, tparm))
&& (!ptr || !arr
|| !same_type_p (TREE_TYPE (targ),
TREE_TYPE (tparm))))
found = false;
}
if (found
&& ix == VEC_length (tree, args)
/* May be this should be sufficient_parms_p instead,
depending on how exactly should user-defined literals
work in presence of default arguments on the literal
operator parameters. */
&& argtypes == void_list_node)
return fn;
}
}
return error_mark_node;
}
/* Parse a user-defined char constant. Returns a call to a user-defined
literal operator taking the character as an argument. */
static tree
cp_parser_userdef_char_literal (cp_parser *parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree value = USERDEF_LITERAL_VALUE (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree decl, result;
/* Build up a call to the user-defined operator */
/* Lookup the name we got back from the id-expression. */
VEC(tree,gc) *args = make_tree_vector ();
VEC_safe_push (tree, gc, args, value);
decl = lookup_literal_operator (name, args);
if (!decl || decl == error_mark_node)
{
error ("unable to find character literal operator %qD with %qT argument",
name, TREE_TYPE (value));
release_tree_vector (args);
return error_mark_node;
}
result = finish_call_expr (decl, &args, false, true, tf_warning_or_error);
release_tree_vector (args);
if (result != error_mark_node)
return result;
error ("unable to find character literal operator %qD with %qT argument",
name, TREE_TYPE (value));
return error_mark_node;
}
/* A subroutine of cp_parser_userdef_numeric_literal to
create a char... template parameter pack from a string node. */
static tree
make_char_string_pack (tree value)
{
tree charvec;
tree argpack = make_node (NONTYPE_ARGUMENT_PACK);
const char *str = TREE_STRING_POINTER (value);
int i, len = TREE_STRING_LENGTH (value) - 1;
tree argvec = make_tree_vec (1);
/* Fill in CHARVEC with all of the parameters. */
charvec = make_tree_vec (len);
for (i = 0; i < len; ++i)
TREE_VEC_ELT (charvec, i) = build_int_cst (char_type_node, str[i]);
/* Build the argument packs. */
SET_ARGUMENT_PACK_ARGS (argpack, charvec);
TREE_TYPE (argpack) = char_type_node;
TREE_VEC_ELT (argvec, 0) = argpack;
return argvec;
}
/* Parse a user-defined numeric constant. returns a call to a user-defined
literal operator. */
static tree
cp_parser_userdef_numeric_literal (cp_parser *parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree value = USERDEF_LITERAL_VALUE (literal);
tree num_string = USERDEF_LITERAL_NUM_STRING (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree decl, result;
VEC(tree,gc) *args;
/* Look for a literal operator taking the exact type of numeric argument
as the literal value. */
args = make_tree_vector ();
VEC_safe_push (tree, gc, args, value);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
result = finish_call_expr (decl, &args, false, true, tf_none);
if (result != error_mark_node)
{
release_tree_vector (args);
return result;
}
}
release_tree_vector (args);
/* If the numeric argument didn't work, look for a raw literal
operator taking a const char* argument consisting of the number
in string format. */
args = make_tree_vector ();
VEC_safe_push (tree, gc, args, num_string);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
result = finish_call_expr (decl, &args, false, true, tf_none);
if (result != error_mark_node)
{
release_tree_vector (args);
return result;
}
}
release_tree_vector (args);
/* If the raw literal didn't work, look for a non-type template
function with parameter pack char.... Call the function with
template parameter characters representing the number. */
args = make_tree_vector ();
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
tree tmpl_args = make_char_string_pack (num_string);
decl = lookup_template_function (decl, tmpl_args);
result = finish_call_expr (decl, &args, false, true, tf_none);
if (result != error_mark_node)
{
release_tree_vector (args);
return result;
}
}
release_tree_vector (args);
error ("unable to find numeric literal operator %qD", name);
return error_mark_node;
}
/* Parse a user-defined string constant. Returns a call to a user-defined
literal operator taking a character pointer and the length of the string
as arguments. */
static tree
cp_parser_userdef_string_literal (cp_token *token)
{
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree value = USERDEF_LITERAL_VALUE (literal);
int len = TREE_STRING_LENGTH (value)
/ TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1;
tree decl, result;
/* Build up a call to the user-defined operator */
/* Lookup the name we got back from the id-expression. */
VEC(tree,gc) *args = make_tree_vector ();
VEC_safe_push (tree, gc, args, value);
VEC_safe_push (tree, gc, args, build_int_cst (size_type_node, len));
decl = lookup_name (name);
if (!decl || decl == error_mark_node)
{
error ("unable to find string literal operator %qD", name);
release_tree_vector (args);
return error_mark_node;
}
result = finish_call_expr (decl, &args, false, true, tf_none);
release_tree_vector (args);
if (result != error_mark_node)
return result;
error ("unable to find string literal operator %qD with %qT, %qT arguments",
name, TREE_TYPE (value), size_type_node);
return error_mark_node;
}
/* Basic concepts [gram.basic] */
/* Parse a translation-unit.
translation-unit:
declaration-seq [opt]
Returns TRUE if all went well. */
static bool
cp_parser_translation_unit (cp_parser* parser)
{
/* The address of the first non-permanent object on the declarator
obstack. */
static void *declarator_obstack_base;
bool success;
/* Create the declarator obstack, if necessary. */
if (!cp_error_declarator)
{
gcc_obstack_init (&declarator_obstack);
/* Create the error declarator. */
cp_error_declarator = make_declarator (cdk_error);
/* Create the empty parameter list. */
no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE);
/* Remember where the base of the declarator obstack lies. */
declarator_obstack_base = obstack_next_free (&declarator_obstack);
}
cp_parser_declaration_seq_opt (parser);
/* If there are no tokens left then all went well. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
/* Get rid of the token array; we don't need it any more. */
cp_lexer_destroy (parser->lexer);
parser->lexer = NULL;
/* This file might have been a context that's implicitly extern
"C". If so, pop the lang context. (Only relevant for PCH.) */
if (parser->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
/* Finish up. */
finish_translation_unit ();
success = true;
}
else
{
cp_parser_error (parser, "expected declaration");
success = false;
}
/* Make sure the declarator obstack was fully cleaned up. */
gcc_assert (obstack_next_free (&declarator_obstack)
== declarator_obstack_base);
/* All went well. */
return success;
}
/* Expressions [gram.expr] */
/* Parse a primary-expression.
primary-expression:
literal
this
( expression )
id-expression
GNU Extensions:
primary-expression:
( compound-statement )
__builtin_va_arg ( assignment-expression , type-id )
__builtin_offsetof ( type-id , offsetof-expression )
C++ Extensions:
__has_nothrow_assign ( type-id )
__has_nothrow_constructor ( type-id )
__has_nothrow_copy ( type-id )
__has_trivial_assign ( type-id )
__has_trivial_constructor ( type-id )
__has_trivial_copy ( type-id )
__has_trivial_destructor ( type-id )
__has_virtual_destructor ( type-id )
__is_abstract ( type-id )
__is_base_of ( type-id , type-id )
__is_class ( type-id )
__is_convertible_to ( type-id , type-id )
__is_empty ( type-id )
__is_enum ( type-id )
__is_final ( type-id )
__is_literal_type ( type-id )
__is_pod ( type-id )
__is_polymorphic ( type-id )
__is_std_layout ( type-id )
__is_trivial ( type-id )
__is_union ( type-id )
Objective-C++ Extension:
primary-expression:
objc-expression
literal:
__null
ADDRESS_P is true iff this expression was immediately preceded by
"&" and therefore might denote a pointer-to-member. CAST_P is true
iff this expression is the target of a cast. TEMPLATE_ARG_P is
true iff this expression is a template argument.
Returns a representation of the expression. Upon return, *IDK
indicates what kind of id-expression (if any) was present. */
static tree
cp_parser_primary_expression (cp_parser *parser,
bool address_p,
bool cast_p,
bool template_arg_p,
cp_id_kind *idk)
{
cp_token *token = NULL;
/* Assume the primary expression is not an id-expression. */
*idk = CP_ID_KIND_NONE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
/* literal:
integer-literal
character-literal
floating-literal
string-literal
boolean-literal
pointer-literal
user-defined-literal */
case CPP_CHAR:
case CPP_CHAR16:
case CPP_CHAR32:
case CPP_WCHAR:
case CPP_NUMBER:
if (TREE_CODE (token->u.value) == USERDEF_LITERAL)
return cp_parser_userdef_numeric_literal (parser);
token = cp_lexer_consume_token (parser->lexer);
if (TREE_CODE (token->u.value) == FIXED_CST)
{
error_at (token->location,
"fixed-point types not supported in C++");
return error_mark_node;
}
/* Floating-point literals are only allowed in an integral
constant expression if they are cast to an integral or
enumeration type. */
if (TREE_CODE (token->u.value) == REAL_CST
&& parser->integral_constant_expression_p
&& pedantic)
{
/* CAST_P will be set even in invalid code like "int(2.7 +
...)". Therefore, we have to check that the next token
is sure to end the cast. */
if (cast_p)
{
cp_token *next_token;
next_token = cp_lexer_peek_token (parser->lexer);
if (/* The comma at the end of an
enumerator-definition. */
next_token->type != CPP_COMMA
/* The curly brace at the end of an enum-specifier. */
&& next_token->type != CPP_CLOSE_BRACE
/* The end of a statement. */
&& next_token->type != CPP_SEMICOLON
/* The end of the cast-expression. */
&& next_token->type != CPP_CLOSE_PAREN
/* The end of an array bound. */
&& next_token->type != CPP_CLOSE_SQUARE
/* The closing ">" in a template-argument-list. */
&& (next_token->type != CPP_GREATER
|| parser->greater_than_is_operator_p)
/* C++0x only: A ">>" treated like two ">" tokens,
in a template-argument-list. */
&& (next_token->type != CPP_RSHIFT
|| (cxx_dialect == cxx98)
|| parser->greater_than_is_operator_p))
cast_p = false;
}
/* If we are within a cast, then the constraint that the
cast is to an integral or enumeration type will be
checked at that point. If we are not within a cast, then
this code is invalid. */
if (!cast_p)
cp_parser_non_integral_constant_expression (parser, NIC_FLOAT);
}
return token->u.value;
case CPP_CHAR_USERDEF:
case CPP_CHAR16_USERDEF:
case CPP_CHAR32_USERDEF:
case CPP_WCHAR_USERDEF:
return cp_parser_userdef_char_literal (parser);
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
case CPP_STRING_USERDEF:
case CPP_STRING16_USERDEF:
case CPP_STRING32_USERDEF:
case CPP_WSTRING_USERDEF:
case CPP_UTF8STRING_USERDEF:
/* ??? Should wide strings be allowed when parser->translate_strings_p
is false (i.e. in attributes)? If not, we can kill the third
argument to cp_parser_string_literal. */
return cp_parser_string_literal (parser,
parser->translate_strings_p,
true);
case CPP_OPEN_PAREN:
{
tree expr;
bool saved_greater_than_is_operator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* If we see `( { ' then we are looking at the beginning of
a GNU statement-expression. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* Statement-expressions are not allowed by the standard. */
pedwarn (token->location, OPT_pedantic,
"ISO C++ forbids braced-groups within expressions");
/* And they're not allowed outside of a function-body; you
cannot, for example, write:
int i = ({ int j = 3; j + 1; });
at class or namespace scope. */
if (!parser->in_function_body
|| parser->in_template_argument_list_p)
{
error_at (token->location,
"statement-expressions are not allowed outside "
"functions nor in template-argument lists");
cp_parser_skip_to_end_of_block_or_statement (parser);
expr = error_mark_node;
}
else
{
/* Start the statement-expression. */
expr = begin_stmt_expr ();
/* Parse the compound-statement. */
cp_parser_compound_statement (parser, expr, false, false);
/* Finish up. */
expr = finish_stmt_expr (expr, false);
}
}
else
{
/* Parse the parenthesized expression. */
expr = cp_parser_expression (parser, cast_p, idk);
/* Let the front end know that this expression was
enclosed in parentheses. This matters in case, for
example, the expression is of the form `A::B', since
`&A::B' might be a pointer-to-member, but `&(A::B)' is
not. */
finish_parenthesized_expr (expr);
/* DR 705: Wrapping an unqualified name in parentheses
suppresses arg-dependent lookup. We want to pass back
CP_ID_KIND_QUALIFIED for suppressing vtable lookup
(c++/37862), but none of the others. */
if (*idk != CP_ID_KIND_QUALIFIED)
*idk = CP_ID_KIND_NONE;
}
/* The `>' token might be the end of a template-id or
template-parameter-list now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Consume the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_end_of_statement (parser);
return expr;
}
case CPP_OPEN_SQUARE:
if (c_dialect_objc ())
/* We have an Objective-C++ message. */
return cp_parser_objc_expression (parser);
{
tree lam = cp_parser_lambda_expression (parser);
/* Don't warn about a failed tentative parse. */
if (cp_parser_error_occurred (parser))
return error_mark_node;
maybe_warn_cpp0x (CPP0X_LAMBDA_EXPR);
return lam;
}
case CPP_OBJC_STRING:
if (c_dialect_objc ())
/* We have an Objective-C++ string literal. */
return cp_parser_objc_expression (parser);
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
case CPP_KEYWORD:
switch (token->keyword)
{
/* These two are the boolean literals. */
case RID_TRUE:
cp_lexer_consume_token (parser->lexer);
return boolean_true_node;
case RID_FALSE:
cp_lexer_consume_token (parser->lexer);
return boolean_false_node;
/* The `__null' literal. */
case RID_NULL:
cp_lexer_consume_token (parser->lexer);
return null_node;
/* The `nullptr' literal. */
case RID_NULLPTR:
cp_lexer_consume_token (parser->lexer);
return nullptr_node;
/* Recognize the `this' keyword. */
case RID_THIS:
cp_lexer_consume_token (parser->lexer);
if (parser->local_variables_forbidden_p)
{
error_at (token->location,
"%<this%> may not be used in this context");
return error_mark_node;
}
/* Pointers cannot appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_THIS))
return error_mark_node;
return finish_this_expr ();
/* The `operator' keyword can be the beginning of an
id-expression. */
case RID_OPERATOR:
goto id_expression;
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
{
non_integral_constant name;
/* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and
__func__ are the names of variables -- but they are
treated specially. Therefore, they are handled here,
rather than relying on the generic id-expression logic
below. Grammatically, these names are id-expressions.
Consume the token. */
token = cp_lexer_consume_token (parser->lexer);
switch (token->keyword)
{
case RID_FUNCTION_NAME:
name = NIC_FUNC_NAME;
break;
case RID_PRETTY_FUNCTION_NAME:
name = NIC_PRETTY_FUNC;
break;
case RID_C99_FUNCTION_NAME:
name = NIC_C99_FUNC;
break;
default:
gcc_unreachable ();
}
if (cp_parser_non_integral_constant_expression (parser, name))
return error_mark_node;
/* Look up the name. */
return finish_fname (token->u.value);
}
case RID_VA_ARG:
{
tree expression;
tree type;
/* The `__builtin_va_arg' construct is used to handle
`va_arg'. Consume the `__builtin_va_arg' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Now, parse the assignment-expression. */
expression = cp_parser_assignment_expression (parser,
/*cast_p=*/false, NULL);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Using `va_arg' in a constant-expression is not
allowed. */
if (cp_parser_non_integral_constant_expression (parser,
NIC_VA_ARG))
return error_mark_node;
return build_x_va_arg (expression, type);
}
case RID_OFFSETOF:
return cp_parser_builtin_offsetof (parser);
case RID_HAS_NOTHROW_ASSIGN:
case RID_HAS_NOTHROW_CONSTRUCTOR:
case RID_HAS_NOTHROW_COPY:
case RID_HAS_TRIVIAL_ASSIGN:
case RID_HAS_TRIVIAL_CONSTRUCTOR:
case RID_HAS_TRIVIAL_COPY:
case RID_HAS_TRIVIAL_DESTRUCTOR:
case RID_HAS_VIRTUAL_DESTRUCTOR:
case RID_IS_ABSTRACT:
case RID_IS_BASE_OF:
case RID_IS_CLASS:
case RID_IS_CONVERTIBLE_TO:
case RID_IS_EMPTY:
case RID_IS_ENUM:
case RID_IS_FINAL:
case RID_IS_LITERAL_TYPE:
case RID_IS_POD:
case RID_IS_POLYMORPHIC:
case RID_IS_STD_LAYOUT:
case RID_IS_TRIVIAL:
case RID_IS_UNION:
return cp_parser_trait_expr (parser, token->keyword);
/* Objective-C++ expressions. */
case RID_AT_ENCODE:
case RID_AT_PROTOCOL:
case RID_AT_SELECTOR:
return cp_parser_objc_expression (parser);
case RID_TEMPLATE:
if (parser->in_function_body
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_LESS))
{
error_at (token->location,
"a template declaration cannot appear at block scope");
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
default:
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
/* An id-expression can start with either an identifier, a
`::' as the beginning of a qualified-id, or the "operator"
keyword. */
case CPP_NAME:
case CPP_SCOPE:
case CPP_TEMPLATE_ID:
case CPP_NESTED_NAME_SPECIFIER:
{
tree id_expression;
tree decl;
const char *error_msg;
bool template_p;
bool done;
cp_token *id_expr_token;
id_expression:
/* Parse the id-expression. */
id_expression
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
if (id_expression == error_mark_node)
return error_mark_node;
id_expr_token = token;
token = cp_lexer_peek_token (parser->lexer);
done = (token->type != CPP_OPEN_SQUARE
&& token->type != CPP_OPEN_PAREN
&& token->type != CPP_DOT
&& token->type != CPP_DEREF
&& token->type != CPP_PLUS_PLUS
&& token->type != CPP_MINUS_MINUS);
/* If we have a template-id, then no further lookup is
required. If the template-id was for a template-class, we
will sometimes have a TYPE_DECL at this point. */
if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR
|| TREE_CODE (id_expression) == TYPE_DECL)
decl = id_expression;
/* Look up the name. */
else
{
tree ambiguous_decls;
/* If we already know that this lookup is ambiguous, then
we've already issued an error message; there's no reason
to check again. */
if (id_expr_token->type == CPP_NAME
&& id_expr_token->ambiguous_p)
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
decl = cp_parser_lookup_name (parser, id_expression,
none_type,
template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
id_expr_token->location);
/* If the lookup was ambiguous, an error will already have
been issued. */
if (ambiguous_decls)
return error_mark_node;
/* In Objective-C++, we may have an Objective-C 2.0
dot-syntax for classes here. */
if (c_dialect_objc ()
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT
&& TREE_CODE (decl) == TYPE_DECL
&& objc_is_class_name (decl))
{
tree component;
cp_lexer_consume_token (parser->lexer);
component = cp_parser_identifier (parser);
if (component == error_mark_node)
return error_mark_node;
return objc_build_class_component_ref (id_expression, component);
}
/* In Objective-C++, an instance variable (ivar) may be preferred
to whatever cp_parser_lookup_name() found. */
decl = objc_lookup_ivar (decl, id_expression);
/* If name lookup gives us a SCOPE_REF, then the
qualifying scope was dependent. */
if (TREE_CODE (decl) == SCOPE_REF)
{
/* At this point, we do not know if DECL is a valid
integral constant expression. We assume that it is
in fact such an expression, so that code like:
template <int N> struct A {
int a[B<N>::i];
};
is accepted. At template-instantiation time, we
will check that B<N>::i is actually a constant. */
return decl;
}
/* Check to see if DECL is a local variable in a context
where that is forbidden. */
if (parser->local_variables_forbidden_p
&& local_variable_p (decl))
{
/* It might be that we only found DECL because we are
trying to be generous with pre-ISO scoping rules.
For example, consider:
int i;
void g() {
for (int i = 0; i < 10; ++i) {}
extern void f(int j = i);
}
Here, name look up will originally find the out
of scope `i'. We need to issue a warning message,
but then use the global `i'. */
decl = check_for_out_of_scope_variable (decl);
if (local_variable_p (decl))
{
error_at (id_expr_token->location,
"local variable %qD may not appear in this context",
decl);
return error_mark_node;
}
}
}
decl = (finish_id_expression
(id_expression, decl, parser->scope,
idk,
parser->integral_constant_expression_p,
parser->allow_non_integral_constant_expression_p,
&parser->non_integral_constant_expression_p,
template_p, done, address_p,
template_arg_p,
&error_msg,
id_expr_token->location));
if (error_msg)
cp_parser_error (parser, error_msg);
return decl;
}
/* Anything else is an error. */
default:
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
}
/* Parse an id-expression.
id-expression:
unqualified-id
qualified-id
qualified-id:
:: [opt] nested-name-specifier template [opt] unqualified-id
:: identifier
:: operator-function-id
:: template-id
Return a representation of the unqualified portion of the
identifier. Sets PARSER->SCOPE to the qualifying scope if there is
a `::' or nested-name-specifier.
Often, if the id-expression was a qualified-id, the caller will
want to make a SCOPE_REF to represent the qualified-id. This
function does not do this in order to avoid wastefully creating
SCOPE_REFs when they are not required.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword.
If CHECK_DEPENDENCY_P is false, then names are looked up inside
uninstantiated templates.
If *TEMPLATE_P is non-NULL, it is set to true iff the
`template' keyword is used to explicitly indicate that the entity
named is a template.
If DECLARATOR_P is true, the id-expression is appearing as part of
a declarator, rather than as part of an expression. */
static tree
cp_parser_id_expression (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
bool *template_p,
bool declarator_p,
bool optional_p)
{
bool global_scope_p;
bool nested_name_specifier_p;
/* Assume the `template' keyword was not used. */
if (template_p)
*template_p = template_keyword_p;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
check_dependency_p,
/*type_p=*/false,
declarator_p)
!= NULL_TREE);
/* If there is a nested-name-specifier, then we are looking at
the first qualified-id production. */
if (nested_name_specifier_p)
{
tree saved_scope;
tree saved_object_scope;
tree saved_qualifying_scope;
tree unqualified_id;
bool is_template;
/* See if the next token is the `template' keyword. */
if (!template_p)
template_p = &is_template;
*template_p = cp_parser_optional_template_keyword (parser);
/* Name lookup we do during the processing of the
unqualified-id might obliterate SCOPE. */
saved_scope = parser->scope;
saved_object_scope = parser->object_scope;
saved_qualifying_scope = parser->qualifying_scope;
/* Process the final unqualified-id. */
unqualified_id = cp_parser_unqualified_id (parser, *template_p,
check_dependency_p,
declarator_p,
/*optional_p=*/false);
/* Restore the SAVED_SCOPE for our caller. */
parser->scope = saved_scope;
parser->object_scope = saved_object_scope;
parser->qualifying_scope = saved_qualifying_scope;
return unqualified_id;
}
/* Otherwise, if we are in global scope, then we are looking at one
of the other qualified-id productions. */
else if (global_scope_p)
{
cp_token *token;
tree id;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an identifier, and the next token is not a "<", then
we can avoid the template-id case. This is an optimization
for this common case. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
return cp_parser_identifier (parser);
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Peek at the next token. (Changes in the token buffer may
have invalidated the pointer obtained above.) */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_NAME:
return cp_parser_identifier (parser);
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
return cp_parser_operator_function_id (parser);
/* Fall through. */
default:
cp_parser_error (parser, "expected id-expression");
return error_mark_node;
}
}
else
return cp_parser_unqualified_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
declarator_p,
optional_p);
}
/* Parse an unqualified-id.
unqualified-id:
identifier
operator-function-id
conversion-function-id
~ class-name
template-id
If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template'
keyword, in a construct like `A::template ...'.
Returns a representation of unqualified-id. For the `identifier'
production, an IDENTIFIER_NODE is returned. For the `~ class-name'
production a BIT_NOT_EXPR is returned; the operand of the
BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the
other productions, see the documentation accompanying the
corresponding parsing functions. If CHECK_DEPENDENCY_P is false,
names are looked up in uninstantiated templates. If DECLARATOR_P
is true, the unqualified-id is appearing as part of a declarator,
rather than as part of an expression. */
static tree
cp_parser_unqualified_id (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool declarator_p,
bool optional_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_NAME:
{
tree id;
/* We don't know yet whether or not this will be a
template-id. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
declarator_p);
/* If it worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, it's an ordinary identifier. */
return cp_parser_identifier (parser);
}
case CPP_TEMPLATE_ID:
return cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
declarator_p);
case CPP_COMPL:
{
tree type_decl;
tree qualifying_scope;
tree object_scope;
tree scope;
bool done;
/* Consume the `~' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the class-name. The standard, as written, seems to
say that:
template <typename T> struct S { ~S (); };
template <typename T> S<T>::~S() {}
is invalid, since `~' must be followed by a class-name, but
`S<T>' is dependent, and so not known to be a class.
That's not right; we need to look in uninstantiated
templates. A further complication arises from:
template <typename T> void f(T t) {
t.T::~T();
}
Here, it is not possible to look up `T' in the scope of `T'
itself. We must look in both the current scope, and the
scope of the containing complete expression.
Yet another issue is:
struct S {
int S;
~S();
};
S::~S() {}
The standard does not seem to say that the `S' in `~S'
should refer to the type `S' and not the data member
`S::S'. */
/* DR 244 says that we look up the name after the "~" in the
same scope as we looked up the qualifying name. That idea
isn't fully worked out; it's more complicated than that. */
scope = parser->scope;
object_scope = parser->object_scope;
qualifying_scope = parser->qualifying_scope;
/* Check for invalid scopes. */
if (scope == error_mark_node)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
if (scope && TREE_CODE (scope) == NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"scope %qT before %<~%> is not a class-name",
scope);
cp_parser_simulate_error (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
gcc_assert (!scope || TYPE_P (scope));
/* If the name is of the form "X::~X" it's OK even if X is a
typedef. */
token = cp_lexer_peek_token (parser->lexer);
if (scope
&& token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
!= CPP_LESS)
&& (token->u.value == TYPE_IDENTIFIER (scope)
|| (CLASS_TYPE_P (scope)
&& constructor_name_p (token->u.value, scope))))
{
cp_lexer_consume_token (parser->lexer);
return build_nt (BIT_NOT_EXPR, scope);
}
/* If there was an explicit qualification (S::~T), first look
in the scope given by the qualification (i.e., S).
Note: in the calls to cp_parser_class_name below we pass
typename_type so that lookup finds the injected-class-name
rather than the constructor. */
done = false;
type_decl = NULL_TREE;
if (scope)
{
cp_parser_parse_tentatively (parser);
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "N::S::~S", look in "N" as well. */
if (!done && scope && qualifying_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = qualifying_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "p->S::~T", look in the scope given by "*p" as well. */
else if (!done && object_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = object_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* Look in the surrounding context. */
if (!done)
{
parser->scope = NULL_TREE;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
if (processing_template_decl)
cp_parser_parse_tentatively (parser);
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (processing_template_decl
&& ! cp_parser_parse_definitely (parser))
{
/* We couldn't find a type with this name, so just accept
it and check for a match at instantiation time. */
type_decl = cp_parser_identifier (parser);
if (type_decl != error_mark_node)
type_decl = build_nt (BIT_NOT_EXPR, type_decl);
return type_decl;
}
}
/* If an error occurred, assume that the name of the
destructor is the same as the name of the qualifying
class. That allows us to keep parsing after running
into ill-formed destructor names. */
if (type_decl == error_mark_node && scope)
return build_nt (BIT_NOT_EXPR, scope);
else if (type_decl == error_mark_node)
return error_mark_node;
/* Check that destructor name and scope match. */
if (declarator_p && scope && !check_dtor_name (scope, type_decl))
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"declaration of %<~%T%> as member of %qT",
type_decl, scope);
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* [class.dtor]
A typedef-name that names a class shall not be used as the
identifier in the declarator for a destructor declaration. */
if (declarator_p
&& !DECL_IMPLICIT_TYPEDEF_P (type_decl)
&& !DECL_SELF_REFERENCE_P (type_decl)
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"typedef-name %qD used as destructor declarator",
type_decl);
return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl));
}
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
{
tree id;
/* This could be a template-id, so we try that first. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* We still don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
id = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
id = cp_parser_conversion_function_id (parser);
else if (UDLIT_OPER_P (id))
{
/* 17.6.3.3.5 */
const char *name = UDLIT_OP_SUFFIX (id);
if (name[0] != '_' && !in_system_header)
warning (0, "literal operator suffixes not preceded by %<_%>"
" are reserved for future standardization");
}
return id;
}
/* Fall through. */
default:
if (optional_p)
return NULL_TREE;
cp_parser_error (parser, "expected unqualified-id");
return error_mark_node;
}
}
/* Parse an (optional) nested-name-specifier.
nested-name-specifier: [C++98]
class-or-namespace-name :: nested-name-specifier [opt]
class-or-namespace-name :: template nested-name-specifier [opt]
nested-name-specifier: [C++0x]
type-name ::
namespace-name ::
nested-name-specifier identifier ::
nested-name-specifier template [opt] simple-template-id ::
PARSER->SCOPE should be set appropriately before this function is
called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in
effect. TYPE_P is TRUE if we non-type bindings should be ignored
in name lookups.
Sets PARSER->SCOPE to the class (TYPE) or namespace
(NAMESPACE_DECL) specified by the nested-name-specifier, or leaves
it unchanged if there is no nested-name-specifier. Returns the new
scope iff there is a nested-name-specifier, or NULL_TREE otherwise.
If IS_DECLARATION is TRUE, the nested-name-specifier is known to be
part of a declaration and/or decl-specifier. */
static tree
cp_parser_nested_name_specifier_opt (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
bool success = false;
cp_token_position start = 0;
cp_token *token;
/* Remember where the nested-name-specifier starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
start = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
}
while (true)
{
tree new_scope;
tree old_scope;
tree saved_qualifying_scope;
bool template_keyword_p;
/* Spot cases that cannot be the beginning of a
nested-name-specifier. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is CPP_NESTED_NAME_SPECIFIER, just process
the already parsed nested-name-specifier. */
if (token->type == CPP_NESTED_NAME_SPECIFIER)
{
/* Grab the nested-name-specifier and continue the loop. */
cp_parser_pre_parsed_nested_name_specifier (parser);
/* If we originally encountered this nested-name-specifier
with IS_DECLARATION set to false, we will not have
resolved TYPENAME_TYPEs, so we must do so here. */
if (is_declaration
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
{
new_scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
if (TREE_CODE (new_scope) != TYPENAME_TYPE)
parser->scope = new_scope;
}
success = true;
continue;
}
/* Spot cases that cannot be the beginning of a
nested-name-specifier. On the second and subsequent times
through the loop, we look for the `template' keyword. */
if (success && token->keyword == RID_TEMPLATE)
;
/* A template-id can start a nested-name-specifier. */
else if (token->type == CPP_TEMPLATE_ID)
;
/* DR 743: decltype can be used in a nested-name-specifier. */
else if (token_is_decltype (token))
;
else
{
/* If the next token is not an identifier, then it is
definitely not a type-name or namespace-name. */
if (token->type != CPP_NAME)
break;
/* If the following token is neither a `<' (to begin a
template-id), nor a `::', then we are not looking at a
nested-name-specifier. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type == CPP_COLON
&& parser->colon_corrects_to_scope_p
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_NAME)
{
error_at (token->location,
"found %<:%> in nested-name-specifier, expected %<::%>");
token->type = CPP_SCOPE;
}
if (token->type != CPP_SCOPE
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
break;
}
/* The nested-name-specifier is optional, so we parse
tentatively. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `template' keyword, if this isn't the
first time through the loop. */
if (success)
template_keyword_p = cp_parser_optional_template_keyword (parser);
else
template_keyword_p = false;
/* Save the old scope since the name lookup we are about to do
might destroy it. */
old_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
/* In a declarator-id like "X<T>::I::Y<T>" we must be able to
look up names in "X<T>::I" in order to determine that "Y" is
a template. So, if we have a typename at this point, we make
an effort to look through it. */
if (is_declaration
&& !typename_keyword_p
&& parser->scope
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
parser->scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
/* Parse the qualifying entity. */
new_scope
= cp_parser_qualifying_entity (parser,
typename_keyword_p,
template_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
/* If we found what we wanted, we keep going; otherwise, we're
done. */
if (!cp_parser_parse_definitely (parser))
{
bool error_p = false;
/* Restore the OLD_SCOPE since it was valid before the
failed attempt at finding the last
class-or-namespace-name. */
parser->scope = old_scope;
parser->qualifying_scope = saved_qualifying_scope;
/* If the next token is a decltype, and the one after that is a
`::', then the decltype has failed to resolve to a class or
enumeration type. Give this error even when parsing
tentatively since it can't possibly be valid--and we're going
to replace it with a CPP_NESTED_NAME_SPECIFIER below, so we
won't get another chance.*/
if (cp_lexer_next_token_is (parser->lexer, CPP_DECLTYPE)
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_SCOPE))
{
token = cp_lexer_consume_token (parser->lexer);
error_at (token->location, "decltype evaluates to %qT, "
"which is not a class or enumeration type",
token->u.value);
parser->scope = error_mark_node;
error_p = true;
/* As below. */
success = true;
cp_lexer_consume_token (parser->lexer);
}
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
break;
/* If the next token is an identifier, and the one after
that is a `::', then any valid interpretation would have
found a class-or-namespace-name. */
while (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_SCOPE)
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_COMPL))
{
token = cp_lexer_consume_token (parser->lexer);
if (!error_p)
{
if (!token->ambiguous_p)
{
tree decl;
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, token->u.value,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
token->location);
if (TREE_CODE (decl) == TEMPLATE_DECL)
error_at (token->location,
"%qD used without template parameters",
decl);
else if (ambiguous_decls)
{
error_at (token->location,
"reference to %qD is ambiguous",
token->u.value);
print_candidates (ambiguous_decls);
decl = error_mark_node;
}
else
{
if (cxx_dialect != cxx98)
cp_parser_name_lookup_error
(parser, token->u.value, decl, NLE_NOT_CXX98,
token->location);
else
cp_parser_name_lookup_error
(parser, token->u.value, decl, NLE_CXX98,
token->location);
}
}
parser->scope = error_mark_node;
error_p = true;
/* Treat this as a successful nested-name-specifier
due to:
[basic.lookup.qual]
If the name found is not a class-name (clause
_class_) or namespace-name (_namespace.def_), the
program is ill-formed. */
success = true;
}
cp_lexer_consume_token (parser->lexer);
}
break;
}
/* We've found one valid nested-name-specifier. */
success = true;
/* Name lookup always gives us a DECL. */
if (TREE_CODE (new_scope) == TYPE_DECL)
new_scope = TREE_TYPE (new_scope);
/* Uses of "template" must be followed by actual templates. */
if (template_keyword_p
&& !(CLASS_TYPE_P (new_scope)
&& ((CLASSTYPE_USE_TEMPLATE (new_scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope)))
|| CLASSTYPE_IS_TEMPLATE (new_scope)))
&& !(TREE_CODE (new_scope) == TYPENAME_TYPE
&& (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope))
== TEMPLATE_ID_EXPR)))
permerror (input_location, TYPE_P (new_scope)
? G_("%qT is not a template")
: G_("%qD is not a template"),
new_scope);
/* If it is a class scope, try to complete it; we are about to
be looking up names inside the class. */
if (TYPE_P (new_scope)
/* Since checking types for dependency can be expensive,
avoid doing it if the type is already complete. */
&& !COMPLETE_TYPE_P (new_scope)
/* Do not try to complete dependent types. */
&& !dependent_type_p (new_scope))
{
new_scope = complete_type (new_scope);
/* If it is a typedef to current class, use the current
class instead, as the typedef won't have any names inside
it yet. */
if (!COMPLETE_TYPE_P (new_scope)
&& currently_open_class (new_scope))
new_scope = TYPE_MAIN_VARIANT (new_scope);
}
/* Make sure we look in the right scope the next time through
the loop. */
parser->scope = new_scope;
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER
token. That way, should we re-parse the token stream, we will
not have to repeat the effort required to do the parse, nor will
we issue duplicate error messages. */
if (success && start)
{
cp_token *token;
token = cp_lexer_token_at (parser->lexer, start);
/* Reset the contents of the START token. */
token->type = CPP_NESTED_NAME_SPECIFIER;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = ggc_alloc_cleared_tree_check ();
token->u.tree_check_value->value = parser->scope;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->u.tree_check_value->qualifying_scope =
parser->qualifying_scope;
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start);
}
if (start)
pop_to_parent_deferring_access_checks ();
return success ? parser->scope : NULL_TREE;
}
/* Parse a nested-name-specifier. See
cp_parser_nested_name_specifier_opt for details. This function
behaves identically, except that it will an issue an error if no
nested-name-specifier is present. */
static tree
cp_parser_nested_name_specifier (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree scope;
/* Look for the nested-name-specifier. */
scope = cp_parser_nested_name_specifier_opt (parser,
typename_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* If it was not present, issue an error message. */
if (!scope)
{
cp_parser_error (parser, "expected nested-name-specifier");
parser->scope = NULL_TREE;
}
return scope;
}
/* Parse the qualifying entity in a nested-name-specifier. For C++98,
this is either a class-name or a namespace-name (which corresponds
to the class-or-namespace-name production in the grammar). For
C++0x, it can also be a type-name that refers to an enumeration
type or a simple-template-id.
TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect.
TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect.
CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up.
TYPE_P is TRUE iff the next name should be taken as a class-name,
even the same name is declared to be another entity in the same
scope.
Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL)
specified by the class-or-namespace-name. If neither is found the
ERROR_MARK_NODE is returned. */
static tree
cp_parser_qualifying_entity (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree scope;
bool only_class_p;
bool successful_parse_p;
/* DR 743: decltype can appear in a nested-name-specifier. */
if (cp_lexer_next_token_is_decltype (parser->lexer))
{
scope = cp_parser_decltype (parser);
if (TREE_CODE (scope) != ENUMERAL_TYPE
&& !MAYBE_CLASS_TYPE_P (scope))
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
if (TYPE_NAME (scope))
scope = TYPE_NAME (scope);
return scope;
}
/* Before we try to parse the class-name, we must save away the
current PARSER->SCOPE since cp_parser_class_name will destroy
it. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* Try for a class-name first. If the SAVED_SCOPE is a type, then
there is no need to look for a namespace-name. */
only_class_p = template_keyword_p
|| (saved_scope && TYPE_P (saved_scope) && cxx_dialect == cxx98);
if (!only_class_p)
cp_parser_parse_tentatively (parser);
scope = cp_parser_class_name (parser,
typename_keyword_p,
template_keyword_p,
type_p ? class_type : none_type,
check_dependency_p,
/*class_head_p=*/false,
is_declaration);
successful_parse_p = only_class_p || cp_parser_parse_definitely (parser);
/* If that didn't work and we're in C++0x mode, try for a type-name. */
if (!only_class_p
&& cxx_dialect != cxx98
&& !successful_parse_p)
{
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* Parse tentatively. */
cp_parser_parse_tentatively (parser);
/* Parse a type-name */
scope = cp_parser_type_name (parser);
/* "If the name found does not designate a namespace or a class,
enumeration, or dependent type, the program is ill-formed."
We cover classes and dependent types above and namespaces below,
so this code is only looking for enums. */
if (!scope || TREE_CODE (scope) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (scope)) != ENUMERAL_TYPE)
cp_parser_simulate_error (parser);
successful_parse_p = cp_parser_parse_definitely (parser);
}
/* If that didn't work, try for a namespace-name. */
if (!only_class_p && !successful_parse_p)
{
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If we are not looking at an identifier followed by the scope
resolution operator, then this is not part of a
nested-name-specifier. (Note that this function is only used
to parse the components of a nested-name-specifier.) */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE)
return error_mark_node;
scope = cp_parser_namespace_name (parser);
}
return scope;
}
/* Parse a postfix-expression.
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( expression-list [opt] )
simple-type-specifier ( expression-list [opt] )
typename :: [opt] nested-name-specifier identifier
( expression-list [opt] )
typename :: [opt] nested-name-specifier template [opt] template-id
( expression-list [opt] )
postfix-expression . template [opt] id-expression
postfix-expression -> template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> pseudo-destructor-name
postfix-expression ++
postfix-expression --
dynamic_cast < type-id > ( expression )
static_cast < type-id > ( expression )
reinterpret_cast < type-id > ( expression )
const_cast < type-id > ( expression )
typeid ( expression )
typeid ( type-id )
GNU Extension:
postfix-expression:
( type-id ) { initializer-list , [opt] }
This extension is a GNU version of the C99 compound-literal
construct. (The C99 grammar uses `type-name' instead of `type-id',
but they are essentially the same concept.)
If ADDRESS_P is true, the postfix expression is the operand of the
`&' operator. CAST_P is true if this expression is the target of a
cast.
If MEMBER_ACCESS_ONLY_P, we only allow postfix expressions that are
class member access expressions [expr.ref].
Returns a representation of the expression. */
static tree
cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p,
bool member_access_only_p,
cp_id_kind * pidk_return)
{
cp_token *token;
enum rid keyword;
cp_id_kind idk = CP_ID_KIND_NONE;
tree postfix_expression = NULL_TREE;
bool is_member_access = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Some of the productions are determined by keywords. */
keyword = token->keyword;
switch (keyword)
{
case RID_DYNCAST:
case RID_STATCAST:
case RID_REINTCAST:
case RID_CONSTCAST:
{
tree type;
tree expression;
const char *saved_message;
/* All of these can be handled in the same way from the point
of view of parsing. Begin by consuming the token
identifying the cast. */
cp_lexer_consume_token (parser->lexer);
/* New types cannot be defined in the cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in casts");
/* Look for the opening `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Parse the type to which we are casting. */
type = cp_parser_type_id (parser);
/* Look for the closing `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
/* And the expression which is being cast. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
expression = cp_parser_expression (parser, /*cast_p=*/true, & idk);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser, NIC_CAST))
return error_mark_node;
switch (keyword)
{
case RID_DYNCAST:
postfix_expression
= build_dynamic_cast (type, expression, tf_warning_or_error);
break;
case RID_STATCAST:
postfix_expression
= build_static_cast (type, expression, tf_warning_or_error);
break;
case RID_REINTCAST:
postfix_expression
= build_reinterpret_cast (type, expression,
tf_warning_or_error);
break;
case RID_CONSTCAST:
postfix_expression
= build_const_cast (type, expression, tf_warning_or_error);
break;
default:
gcc_unreachable ();
}
}
break;
case RID_TYPEID:
{
tree type;
const char *saved_message;
bool saved_in_type_id_in_expr_p;
/* Consume the `typeid' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `(' token. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Types cannot be defined in a `typeid' expression. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in a %<typeid%> expression");
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Try a type-id first. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)' token. Otherwise, we can't be sure that
we're not looking at an expression: consider `typeid (int
(3))', for example. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, simply lookup the type-id. */
if (cp_parser_parse_definitely (parser))
postfix_expression = get_typeid (type);
/* Otherwise, fall back to the expression variant. */
else
{
tree expression;
/* Look for an expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false, & idk);
/* Compute its typeid. */
postfix_expression = build_typeid (expression);
/* Look for the `)' token. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* `typeid' may not appear in an integral constant expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_TYPEID))
return error_mark_node;
}
break;
case RID_TYPENAME:
{
tree type;
/* The syntax permitted here is the same permitted for an
elaborated-type-specifier. */
type = cp_parser_elaborated_type_specifier (parser,
/*is_friend=*/false,
/*is_declaration=*/false);
postfix_expression = cp_parser_functional_cast (parser, type);
}
break;
default:
{
tree type;
/* If the next thing is a simple-type-specifier, we may be
looking at a functional cast. We could also be looking at
an id-expression. So, we try the functional cast, and if
that doesn't work we fall back to the primary-expression. */
cp_parser_parse_tentatively (parser);
/* Look for the simple-type-specifier. */
type = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
/* Parse the cast itself. */
if (!cp_parser_error_occurred (parser))
postfix_expression
= cp_parser_functional_cast (parser, type);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
break;
/* If the functional-cast didn't work out, try a
compound-literal. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
VEC(constructor_elt,gc) *initializer_list = NULL;
bool saved_in_type_id_in_expr_p;
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Look for the `{'. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* If things aren't going well, there's no need to
keep going. */
if (!cp_parser_error_occurred (parser))
{
bool non_constant_p;
/* Parse the initializer-list. */
initializer_list
= cp_parser_initializer_list (parser, &non_constant_p);
/* Allow a trailing `,'. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
/* Look for the final `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* If that worked, we're definitely looking at a
compound-literal expression. */
if (cp_parser_parse_definitely (parser))
{
/* Warn the user that a compound literal is not
allowed in standard C++. */
pedwarn (input_location, OPT_pedantic, "ISO C++ forbids compound-literals");
/* For simplicity, we disallow compound literals in
constant-expressions. We could
allow compound literals of integer type, whose
initializer was a constant, in constant
expressions. Permitting that usage, as a further
extension, would not change the meaning of any
currently accepted programs. (Of course, as
compound literals are not part of ISO C++, the
standard has nothing to say.) */
if (cp_parser_non_integral_constant_expression (parser,
NIC_NCC))
{
postfix_expression = error_mark_node;
break;
}
/* Form the representation of the compound-literal. */
postfix_expression
= (finish_compound_literal
(type, build_constructor (init_list_type_node,
initializer_list),
tf_warning_or_error));
break;
}
}
/* It must be a primary-expression. */
postfix_expression
= cp_parser_primary_expression (parser, address_p, cast_p,
/*template_arg_p=*/false,
&idk);
}
break;
}
/* Keep looping until the postfix-expression is complete. */
while (true)
{
if (idk == CP_ID_KIND_UNQUALIFIED
&& TREE_CODE (postfix_expression) == IDENTIFIER_NODE
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
/* It is not a Koenig lookup function call. */
postfix_expression
= unqualified_name_lookup_error (postfix_expression);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
postfix_expression
= cp_parser_postfix_open_square_expression (parser,
postfix_expression,
false);
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
case CPP_OPEN_PAREN:
/* postfix-expression ( expression-list [opt] ) */
{
bool koenig_p;
bool is_builtin_constant_p;
bool saved_integral_constant_expression_p = false;
bool saved_non_integral_constant_expression_p = false;
VEC(tree,gc) *args;
is_member_access = false;
is_builtin_constant_p
= DECL_IS_BUILTIN_CONSTANT_P (postfix_expression);
if (is_builtin_constant_p)
{
/* The whole point of __builtin_constant_p is to allow
non-constant expressions to appear as arguments. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
}
args = (cp_parser_parenthesized_expression_list
(parser, non_attr,
/*cast_p=*/false, /*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
if (is_builtin_constant_p)
{
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
}
if (args == NULL)
{
postfix_expression = error_mark_node;
break;
}
/* Function calls are not permitted in
constant-expressions. */
if (! builtin_valid_in_constant_expr_p (postfix_expression)
&& cp_parser_non_integral_constant_expression (parser,
NIC_FUNC_CALL))
{
postfix_expression = error_mark_node;
release_tree_vector (args);
break;
}
koenig_p = false;
if (idk == CP_ID_KIND_UNQUALIFIED
|| idk == CP_ID_KIND_TEMPLATE_ID)
{
if (TREE_CODE (postfix_expression) == IDENTIFIER_NODE)
{
if (!VEC_empty (tree, args))
{
koenig_p = true;
if (!any_type_dependent_arguments_p (args))
postfix_expression
= perform_koenig_lookup (postfix_expression, args,
/*include_std=*/false,
tf_warning_or_error);
}
else
postfix_expression
= unqualified_fn_lookup_error (postfix_expression);
}
/* We do not perform argument-dependent lookup if
normal lookup finds a non-function, in accordance
with the expected resolution of DR 218. */
else if (!VEC_empty (tree, args)
&& is_overloaded_fn (postfix_expression))
{
tree fn = get_first_fn (postfix_expression);
fn = STRIP_TEMPLATE (fn);
/* Do not do argument dependent lookup if regular
lookup finds a member function or a block-scope
function declaration. [basic.lookup.argdep]/3 */
if (!DECL_FUNCTION_MEMBER_P (fn)
&& !DECL_LOCAL_FUNCTION_P (fn))
{
koenig_p = true;
if (!any_type_dependent_arguments_p (args))
postfix_expression
= perform_koenig_lookup (postfix_expression, args,
/*include_std=*/false,
tf_warning_or_error);
}
}
}
if (TREE_CODE (postfix_expression) == COMPONENT_REF)
{
tree instance = TREE_OPERAND (postfix_expression, 0);
tree fn = TREE_OPERAND (postfix_expression, 1);
if (processing_template_decl
&& (type_dependent_expression_p (instance)
|| (!BASELINK_P (fn)
&& TREE_CODE (fn) != FIELD_DECL)
|| type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (args)))
{
postfix_expression
= build_nt_call_vec (postfix_expression, args);
release_tree_vector (args);
break;
}
if (BASELINK_P (fn))
{
postfix_expression
= (build_new_method_call
(instance, fn, &args, NULL_TREE,
(idk == CP_ID_KIND_QUALIFIED
? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL
: LOOKUP_NORMAL),
/*fn_p=*/NULL,
tf_warning_or_error));
}
else
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
tf_warning_or_error);
}
else if (TREE_CODE (postfix_expression) == OFFSET_REF
|| TREE_CODE (postfix_expression) == MEMBER_REF
|| TREE_CODE (postfix_expression) == DOTSTAR_EXPR)
postfix_expression = (build_offset_ref_call_from_tree
(postfix_expression, &args));
else if (idk == CP_ID_KIND_QUALIFIED)
/* A call to a static class member, or a namespace-scope
function. */
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/true,
koenig_p,
tf_warning_or_error);
else
/* All other function calls. */
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/false,
koenig_p,
tf_warning_or_error);
/* The POSTFIX_EXPRESSION is certainly no longer an id. */
idk = CP_ID_KIND_NONE;
release_tree_vector (args);
}
break;
case CPP_DOT:
case CPP_DEREF:
/* postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name */
/* Consume the `.' or `->' operator. */
cp_lexer_consume_token (parser->lexer);
postfix_expression
= cp_parser_postfix_dot_deref_expression (parser, token->type,
postfix_expression,
false, &idk,
token->location);
is_member_access = true;
break;
case CPP_PLUS_PLUS:
/* postfix-expression ++ */
/* Consume the `++' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTINCREMENT_EXPR);
/* Increments may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_INC))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
case CPP_MINUS_MINUS:
/* postfix-expression -- */
/* Consume the `--' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTDECREMENT_EXPR);
/* Decrements may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_DEC))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
default:
if (pidk_return != NULL)
* pidk_return = idk;
if (member_access_only_p)
return is_member_access? postfix_expression : error_mark_node;
else
return postfix_expression;
}
}
/* We should never get here. */
gcc_unreachable ();
return error_mark_node;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression [ expression ]
postfix-expression [ braced-init-list ] (C++11)
FOR_OFFSETOF is set if we're being called in that context, which
changes how we deal with integer constant expressions. */
static tree
cp_parser_postfix_open_square_expression (cp_parser *parser,
tree postfix_expression,
bool for_offsetof)
{
tree index;
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the index expression. */
/* ??? For offsetof, there is a question of what to allow here. If
offsetof is not being used in an integral constant expression context,
then we *could* get the right answer by computing the value at runtime.
If we are in an integral constant expression context, then we might
could accept any constant expression; hard to say without analysis.
Rather than open the barn door too wide right away, allow only integer
constant expressions here. */
if (for_offsetof)
index = cp_parser_constant_expression (parser, false, NULL);
else
{
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_nonconst_p;
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
index = cp_parser_braced_list (parser, &expr_nonconst_p);
}
else
index = cp_parser_expression (parser, /*cast_p=*/false, NULL);
}
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Build the ARRAY_REF. */
postfix_expression = grok_array_decl (postfix_expression, index);
/* When not doing offsetof, array references are not permitted in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression (parser, NIC_ARRAY_REF)))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name
FOR_OFFSETOF is set if we're being called in that context. That sorta
limits what of the above we'll actually accept, but nevermind.
TOKEN_TYPE is the "." or "->" token, which will already have been
removed from the stream. */
static tree
cp_parser_postfix_dot_deref_expression (cp_parser *parser,
enum cpp_ttype token_type,
tree postfix_expression,
bool for_offsetof, cp_id_kind *idk,
location_t location)
{
tree name;
bool dependent_p;
bool pseudo_destructor_p;
tree scope = NULL_TREE;
/* If this is a `->' operator, dereference the pointer. */
if (token_type == CPP_DEREF)
postfix_expression = build_x_arrow (postfix_expression);
/* Check to see whether or not the expression is type-dependent. */
dependent_p = type_dependent_expression_p (postfix_expression);
/* The identifier following the `->' or `.' is not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
*idk = CP_ID_KIND_NONE;
/* Enter the scope corresponding to the type of the object
given by the POSTFIX_EXPRESSION. */
if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE)
{
scope = TREE_TYPE (postfix_expression);
/* According to the standard, no expression should ever have
reference type. Unfortunately, we do not currently match
the standard in this respect in that our internal representation
of an expression may have reference type even when the standard
says it does not. Therefore, we have to manually obtain the
underlying type here. */
scope = non_reference (scope);
/* The type of the POSTFIX_EXPRESSION must be complete. */
if (scope == unknown_type_node)
{
error_at (location, "%qE does not have class type",
postfix_expression);
scope = NULL_TREE;
}
/* Unlike the object expression in other contexts, *this is not
required to be of complete type for purposes of class member
access (5.2.5) outside the member function body. */
else if (scope != current_class_ref
&& !(processing_template_decl && scope == current_class_type))
scope = complete_type_or_else (scope, NULL_TREE);
/* Let the name lookup machinery know that we are processing a
class member access expression. */
parser->context->object_type = scope;
/* If something went wrong, we want to be able to discern that case,
as opposed to the case where there was no SCOPE due to the type
of expression being dependent. */
if (!scope)
scope = error_mark_node;
/* If the SCOPE was erroneous, make the various semantic analysis
functions exit quickly -- and without issuing additional error
messages. */
if (scope == error_mark_node)
postfix_expression = error_mark_node;
}
/* Assume this expression is not a pseudo-destructor access. */
pseudo_destructor_p = false;
/* If the SCOPE is a scalar type, then, if this is a valid program,
we must be looking at a pseudo-destructor-name. If POSTFIX_EXPRESSION
is type dependent, it can be pseudo-destructor-name or something else.
Try to parse it as pseudo-destructor-name first. */
if ((scope && SCALAR_TYPE_P (scope)) || dependent_p)
{
tree s;
tree type;
cp_parser_parse_tentatively (parser);
/* Parse the pseudo-destructor-name. */
s = NULL_TREE;
cp_parser_pseudo_destructor_name (parser, &s, &type);
if (dependent_p
&& (cp_parser_error_occurred (parser)
|| TREE_CODE (type) != TYPE_DECL
|| !SCALAR_TYPE_P (TREE_TYPE (type))))
cp_parser_abort_tentative_parse (parser);
else if (cp_parser_parse_definitely (parser))
{
pseudo_destructor_p = true;
postfix_expression
= finish_pseudo_destructor_expr (postfix_expression,
s, TREE_TYPE (type));
}
}
if (!pseudo_destructor_p)
{
/* If the SCOPE is not a scalar type, we are looking at an
ordinary class member access expression, rather than a
pseudo-destructor-name. */
bool template_p;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Parse the id-expression. */
name = (cp_parser_id_expression
(parser,
cp_parser_optional_template_keyword (parser),
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false));
/* In general, build a SCOPE_REF if the member name is qualified.
However, if the name was not dependent and has already been
resolved; there is no need to build the SCOPE_REF. For example;
struct X { void f(); };
template <typename T> void f(T* t) { t->X::f(); }
Even though "t" is dependent, "X::f" is not and has been resolved
to a BASELINK; there is no need to include scope information. */
/* But we do need to remember that there was an explicit scope for
virtual function calls. */
if (parser->scope)
*idk = CP_ID_KIND_QUALIFIED;
/* If the name is a template-id that names a type, we will get a
TYPE_DECL here. That is invalid code. */
if (TREE_CODE (name) == TYPE_DECL)
{
error_at (token->location, "invalid use of %qD", name);
postfix_expression = error_mark_node;
}
else
{
if (name != error_mark_node && !BASELINK_P (name) && parser->scope)
{
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
{
error_at (token->location, "%<%D::%D%> is not a class member",
parser->scope, name);
postfix_expression = error_mark_node;
}
else
name = build_qualified_name (/*type=*/NULL_TREE,
parser->scope,
name,
template_p);
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
if (parser->scope && name && BASELINK_P (name))
adjust_result_of_qualified_name_lookup
(name, parser->scope, scope);
postfix_expression
= finish_class_member_access_expr (postfix_expression, name,
template_p,
tf_warning_or_error);
}
}
/* We no longer need to look up names in the scope of the object on
the left-hand side of the `.' or `->' operator. */
parser->context->object_type = NULL_TREE;
/* Outside of offsetof, these operators may not appear in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression
(parser, token_type == CPP_DEREF ? NIC_ARROW : NIC_POINT)))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* Parse a parenthesized expression-list.
expression-list:
assignment-expression
expression-list, assignment-expression
attribute-list:
expression-list
identifier
identifier, expression-list
CAST_P is true if this expression is the target of a cast.
ALLOW_EXPANSION_P is true if this expression allows expansion of an
argument pack.
Returns a vector of trees. Each element is a representation of an
assignment-expression. NULL is returned if the ( and or ) are
missing. An empty, but allocated, vector is returned on no
expressions. The parentheses are eaten. IS_ATTRIBUTE_LIST is id_attr
if we are parsing an attribute list for an attribute that wants a
plain identifier argument, normal_attr for an attribute that wants
an expression, or non_attr if we aren't parsing an attribute list. If
NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or
not all of the expressions in the list were constant. */
static VEC(tree,gc) *
cp_parser_parenthesized_expression_list (cp_parser* parser,
int is_attribute_list,
bool cast_p,
bool allow_expansion_p,
bool *non_constant_p)
{
VEC(tree,gc) *expression_list;
bool fold_expr_p = is_attribute_list != non_attr;
tree identifier = NULL_TREE;
bool saved_greater_than_is_operator_p;
/* Assume all the expressions will be constant. */
if (non_constant_p)
*non_constant_p = false;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return NULL;
expression_list = make_tree_vector ();
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Consume expressions until there are no more. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
while (true)
{
tree expr;
/* At the beginning of attribute lists, check to see if the
next token is an identifier. */
if (is_attribute_list == id_attr
&& cp_lexer_peek_token (parser->lexer)->type == CPP_NAME)
{
cp_token *token;
/* Consume the identifier. */
token = cp_lexer_consume_token (parser->lexer);
/* Save the identifier. */
identifier = token->u.value;
}
else
{
bool expr_non_constant_p;
/* Parse the next assignment-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* A braced-init-list. */
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expr = cp_parser_braced_list (parser, &expr_non_constant_p);
if (non_constant_p && expr_non_constant_p)
*non_constant_p = true;
}
else if (non_constant_p)
{
expr = (cp_parser_constant_expression
(parser, /*allow_non_constant_p=*/true,
&expr_non_constant_p));
if (expr_non_constant_p)
*non_constant_p = true;
}
else
expr = cp_parser_assignment_expression (parser, cast_p, NULL);
if (fold_expr_p)
expr = fold_non_dependent_expr (expr);
/* If we have an ellipsis, then this is an expression
expansion. */
if (allow_expansion_p
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Build the argument pack. */
expr = make_pack_expansion (expr);
}
/* Add it to the list. We add error_mark_node
expressions to the list, so that we can still tell if
the correct form for a parenthesized expression-list
is found. That gives better errors. */
VEC_safe_push (tree, gc, expression_list, expr);
if (expr == error_mark_node)
goto skip_comma;
}
/* After the first item, attribute lists look the same as
expression lists. */
is_attribute_list = non_attr;
get_comma:;
/* If the next token isn't a `,', then we are done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
int ending;
skip_comma:;
/* We try and resync to an unnested comma, as that will give the
user better diagnostics. */
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
if (!ending)
{
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
return NULL;
}
}
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
if (identifier)
VEC_safe_insert (tree, gc, expression_list, 0, identifier);
return expression_list;
}
/* Parse a pseudo-destructor-name.
pseudo-destructor-name:
:: [opt] nested-name-specifier [opt] type-name :: ~ type-name
:: [opt] nested-name-specifier template template-id :: ~ type-name
:: [opt] nested-name-specifier [opt] ~ type-name
If either of the first two productions is used, sets *SCOPE to the
TYPE specified before the final `::'. Otherwise, *SCOPE is set to
NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name,
or ERROR_MARK_NODE if the parse fails. */
static void
cp_parser_pseudo_destructor_name (cp_parser* parser,
tree* scope,
tree* type)
{
bool nested_name_specifier_p;
/* Assume that things will not work out. */
*type = error_mark_node;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
/* Now, if we saw a nested-name-specifier, we might be doing the
second production. */
if (nested_name_specifier_p
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-id. */
cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/false,
/*is_declaration=*/true);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
}
/* If the next token is not a `~', then there might be some
additional qualification. */
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL))
{
/* At this point, we're looking for "type-name :: ~". The type-name
must not be a class-name, since this is a pseudo-destructor. So,
it must be either an enum-name, or a typedef-name -- both of which
are just identifiers. So, we peek ahead to check that the "::"
and "~" tokens are present; if they are not, then we can avoid
calling type_name. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_NAME
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE
|| cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL)
{
cp_parser_error (parser, "non-scalar type");
return;
}
/* Look for the type-name. */
*scope = TREE_TYPE (cp_parser_nonclass_name (parser));
if (*scope == error_mark_node)
return;
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
}
else
*scope = NULL_TREE;
/* Look for the `~'. */
cp_parser_require (parser, CPP_COMPL, RT_COMPL);
/* Once we see the ~, this has to be a pseudo-destructor. */
if (!processing_template_decl && !cp_parser_error_occurred (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Look for the type-name again. We are not responsible for
checking that it matches the first type-name. */
*type = cp_parser_nonclass_name (parser);
}
/* Parse a unary-expression.
unary-expression:
postfix-expression
++ cast-expression
-- cast-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-id )
alignof ( type-id ) [C++0x]
new-expression
delete-expression
GNU Extensions:
unary-expression:
__extension__ cast-expression
__alignof__ unary-expression
__alignof__ ( type-id )
alignof unary-expression [C++0x]
__real__ cast-expression
__imag__ cast-expression
&& identifier
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
cp_id_kind * pidk)
{
cp_token *token;
enum tree_code unary_operator;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Some keywords give away the kind of expression. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_ALIGNOF:
case RID_SIZEOF:
{
tree operand;
enum tree_code op;
op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the operand. */
operand = cp_parser_sizeof_operand (parser, keyword);
if (TYPE_P (operand))
return cxx_sizeof_or_alignof_type (operand, op, true);
else
{
/* ISO C++ defines alignof only with types, not with
expressions. So pedwarn if alignof is used with a non-
type expression. However, __alignof__ is ok. */
if (!strcmp (IDENTIFIER_POINTER (token->u.value), "alignof"))
pedwarn (token->location, OPT_pedantic,
"ISO C++ does not allow %<alignof%> "
"with a non-type");
return cxx_sizeof_or_alignof_expr (operand, op, true);
}
}
case RID_NEW:
return cp_parser_new_expression (parser);
case RID_DELETE:
return cp_parser_delete_expression (parser);
case RID_EXTENSION:
{
/* The saved value of the PEDANTIC flag. */
int saved_pedantic;
tree expr;
/* Save away the PEDANTIC flag. */
cp_parser_extension_opt (parser, &saved_pedantic);
/* Parse the cast-expression. */
expr = cp_parser_simple_cast_expression (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return expr;
}
case RID_REALPART:
case RID_IMAGPART:
{
tree expression;
/* Consume the `__real__' or `__imag__' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* Create the complete representation. */
return build_x_unary_op ((keyword == RID_REALPART
? REALPART_EXPR : IMAGPART_EXPR),
expression,
tf_warning_or_error);
}
break;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
return cp_parser_transaction_expression (parser, keyword);
case RID_NOEXCEPT:
{
tree expr;
const char *saved_message;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
bool saved_greater_than_is_operator_p;
cp_lexer_consume_token (parser->lexer);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in %<noexcept%> expressions");
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
expr = cp_parser_expression (parser, false, NULL);
--c_inhibit_evaluation_warnings;
--cp_unevaluated_operand;
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
parser->type_definition_forbidden_message = saved_message;
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return finish_noexcept_expr (expr, tf_warning_or_error);
}
default:
break;
}
}
/* Look for the `:: new' and `:: delete', which also signal the
beginning of a new-expression, or delete-expression,
respectively. If the next token is `::', then it might be one of
these. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
{
enum rid keyword;
/* See if the token after the `::' is one of the keywords in
which we're interested. */
keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword;
/* If it's `new', we have a new-expression. */
if (keyword == RID_NEW)
return cp_parser_new_expression (parser);
/* Similarly, for `delete'. */
else if (keyword == RID_DELETE)
return cp_parser_delete_expression (parser);
}
/* Look for a unary operator. */
unary_operator = cp_parser_unary_operator (token);
/* The `++' and `--' operators can be handled similarly, even though
they are not technically unary-operators in the grammar. */
if (unary_operator == ERROR_MARK)
{
if (token->type == CPP_PLUS_PLUS)
unary_operator = PREINCREMENT_EXPR;
else if (token->type == CPP_MINUS_MINUS)
unary_operator = PREDECREMENT_EXPR;
/* Handle the GNU address-of-label extension. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& token->type == CPP_AND_AND)
{
tree identifier;
tree expression;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
/* Consume the '&&' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
/* Create an expression representing the address. */
expression = finish_label_address_expr (identifier, loc);
if (cp_parser_non_integral_constant_expression (parser,
NIC_ADDR_LABEL))
expression = error_mark_node;
return expression;
}
}
if (unary_operator != ERROR_MARK)
{
tree cast_expression;
tree expression = error_mark_node;
non_integral_constant non_constant_p = NIC_NONE;
/* Consume the operator token. */
token = cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
cast_expression
= cp_parser_cast_expression (parser,
unary_operator == ADDR_EXPR,
/*cast_p=*/false, pidk);
/* Now, build an appropriate representation. */
switch (unary_operator)
{
case INDIRECT_REF:
non_constant_p = NIC_STAR;
expression = build_x_indirect_ref (cast_expression, RO_UNARY_STAR,
tf_warning_or_error);
break;
case ADDR_EXPR:
non_constant_p = NIC_ADDR;
/* Fall through. */
case BIT_NOT_EXPR:
expression = build_x_unary_op (unary_operator, cast_expression,
tf_warning_or_error);
break;
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
non_constant_p = unary_operator == PREINCREMENT_EXPR
? NIC_PREINCREMENT : NIC_PREDECREMENT;
/* Fall through. */
case UNARY_PLUS_EXPR:
case NEGATE_EXPR:
case TRUTH_NOT_EXPR:
expression = finish_unary_op_expr (unary_operator, cast_expression);
break;
default:
gcc_unreachable ();
}
if (non_constant_p != NIC_NONE
&& cp_parser_non_integral_constant_expression (parser,
non_constant_p))
expression = error_mark_node;
return expression;
}
return cp_parser_postfix_expression (parser, address_p, cast_p,
/*member_access_only_p=*/false,
pidk);
}
/* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a
unary-operator, the corresponding tree code is returned. */
static enum tree_code
cp_parser_unary_operator (cp_token* token)
{
switch (token->type)
{
case CPP_MULT:
return INDIRECT_REF;
case CPP_AND:
return ADDR_EXPR;
case CPP_PLUS:
return UNARY_PLUS_EXPR;
case CPP_MINUS:
return NEGATE_EXPR;
case CPP_NOT:
return TRUTH_NOT_EXPR;
case CPP_COMPL:
return BIT_NOT_EXPR;
default:
return ERROR_MARK;
}
}
/* Parse a new-expression.
new-expression:
:: [opt] new new-placement [opt] new-type-id new-initializer [opt]
:: [opt] new new-placement [opt] ( type-id ) new-initializer [opt]
Returns a representation of the expression. */
static tree
cp_parser_new_expression (cp_parser* parser)
{
bool global_scope_p;
VEC(tree,gc) *placement;
tree type;
VEC(tree,gc) *initializer;
tree nelts;
tree ret;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `new' operator. */
cp_parser_require_keyword (parser, RID_NEW, RT_NEW);
/* There's no easy way to tell a new-placement from the
`( type-id )' construct. */
cp_parser_parse_tentatively (parser);
/* Look for a new-placement. */
placement = cp_parser_new_placement (parser);
/* If that didn't work out, there's no new-placement. */
if (!cp_parser_parse_definitely (parser))
{
if (placement != NULL)
release_tree_vector (placement);
placement = NULL;
}
/* If the next token is a `(', then we have a parenthesized
type-id. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_token *token;
const char *saved_message = parser->type_definition_forbidden_message;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-id. */
parser->type_definition_forbidden_message
= G_("types may not be defined in a new-expression");
type = cp_parser_type_id (parser);
parser->type_definition_forbidden_message = saved_message;
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
token = cp_lexer_peek_token (parser->lexer);
/* There should not be a direct-new-declarator in this production,
but GCC used to allowed this, so we check and emit a sensible error
message for this case. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
error_at (token->location,
"array bound forbidden after parenthesized type-id");
inform (token->location,
"try removing the parentheses around the type-id");
cp_parser_direct_new_declarator (parser);
}
nelts = NULL_TREE;
}
/* Otherwise, there must be a new-type-id. */
else
type = cp_parser_new_type_id (parser, &nelts);
/* If the next token is a `(' or '{', then we have a new-initializer. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
initializer = cp_parser_new_initializer (parser);
else
initializer = NULL;
/* A new-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_NEW))
ret = error_mark_node;
else
{
/* Create a representation of the new-expression. */
ret = build_new (&placement, type, nelts, &initializer, global_scope_p,
tf_warning_or_error);
}
if (placement != NULL)
release_tree_vector (placement);
if (initializer != NULL)
release_tree_vector (initializer);
return ret;
}
/* Parse a new-placement.
new-placement:
( expression-list )
Returns the same representation as for an expression-list. */
static VEC(tree,gc) *
cp_parser_new_placement (cp_parser* parser)
{
VEC(tree,gc) *expression_list;
/* Parse the expression-list. */
expression_list = (cp_parser_parenthesized_expression_list
(parser, non_attr, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
return expression_list;
}
/* Parse a new-type-id.
new-type-id:
type-specifier-seq new-declarator [opt]
Returns the TYPE allocated. If the new-type-id indicates an array
type, *NELTS is set to the number of elements in the last array
bound; the TYPE will not include the last array bound. */
static tree
cp_parser_new_type_id (cp_parser* parser, tree *nelts)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *new_declarator;
cp_declarator *declarator;
cp_declarator *outer_declarator;
const char *saved_message;
tree type;
/* The type-specifier sequence must not contain type definitions.
(It cannot contain declarations of new types either, but if they
are not definitions we will catch that because they are not
complete.) */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in a new-type-id");
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifier_seq);
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
/* Parse the new-declarator. */
new_declarator = cp_parser_new_declarator_opt (parser);
/* Determine the number of elements in the last array dimension, if
any. */
*nelts = NULL_TREE;
/* Skip down to the last array dimension. */
declarator = new_declarator;
outer_declarator = NULL;
while (declarator && (declarator->kind == cdk_pointer
|| declarator->kind == cdk_ptrmem))
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
while (declarator
&& declarator->kind == cdk_array
&& declarator->declarator
&& declarator->declarator->kind == cdk_array)
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
if (declarator && declarator->kind == cdk_array)
{
*nelts = declarator->u.array.bounds;
if (*nelts == error_mark_node)
*nelts = integer_one_node;
if (outer_declarator)
outer_declarator->declarator = declarator->declarator;
else
new_declarator = NULL;
}
type = groktypename (&type_specifier_seq, new_declarator, false);
return type;
}
/* Parse an (optional) new-declarator.
new-declarator:
ptr-operator new-declarator [opt]
direct-new-declarator
Returns the declarator. */
static cp_declarator *
cp_parser_new_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree type;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Look for a ptr-operator. */
code = cp_parser_ptr_operator (parser, &type, &cv_quals);
/* If that worked, look for more new-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_new_declarator_opt (parser);
return cp_parser_make_indirect_declarator
(code, type, cv_quals, declarator);
}
/* If the next token is a `[', there is a direct-new-declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
return cp_parser_direct_new_declarator (parser);
return NULL;
}
/* Parse a direct-new-declarator.
direct-new-declarator:
[ expression ]
direct-new-declarator [constant-expression]
*/
static cp_declarator *
cp_parser_direct_new_declarator (cp_parser* parser)
{
cp_declarator *declarator = NULL;
while (true)
{
tree expression;
/* Look for the opening `['. */
cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE);
/* The first expression is not required to be constant. */
if (!declarator)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* The standard requires that the expression have integral
type. DR 74 adds enumeration types. We believe that the
real intent is that these expressions be handled like the
expression in a `switch' condition, which also allows
classes with a single conversion to integral or
enumeration type. */
if (!processing_template_decl)
{
expression
= build_expr_type_conversion (WANT_INT | WANT_ENUM,
expression,
/*complain=*/true);
if (!expression)
{
error_at (token->location,
"expression in new-declarator must have integral "
"or enumeration type");
expression = error_mark_node;
}
}
}
/* But all the other expressions must be. */
else
expression
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Add this bound to the declarator. */
declarator = make_array_declarator (declarator, expression);
/* If the next token is not a `[', then there are no more
bounds. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
break;
}
return declarator;
}
/* Parse a new-initializer.
new-initializer:
( expression-list [opt] )
braced-init-list
Returns a representation of the expression-list. */
static VEC(tree,gc) *
cp_parser_new_initializer (cp_parser* parser)
{
VEC(tree,gc) *expression_list;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
tree t;
bool expr_non_constant_p;
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
t = cp_parser_braced_list (parser, &expr_non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (t) = 1;
expression_list = make_tree_vector_single (t);
}
else
expression_list = (cp_parser_parenthesized_expression_list
(parser, non_attr, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
return expression_list;
}
/* Parse a delete-expression.
delete-expression:
:: [opt] delete cast-expression
:: [opt] delete [ ] cast-expression
Returns a representation of the expression. */
static tree
cp_parser_delete_expression (cp_parser* parser)
{
bool global_scope_p;
bool array_p;
tree expression;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `delete' keyword. */
cp_parser_require_keyword (parser, RID_DELETE, RT_DELETE);
/* See if the array syntax is in use. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Remember that this is the `[]' construct. */
array_p = true;
}
else
array_p = false;
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* A delete-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_DEL))
return error_mark_node;
return delete_sanity (expression, NULL_TREE, array_p, global_scope_p,
tf_warning_or_error);
}
/* Returns true if TOKEN may start a cast-expression and false
otherwise. */
static bool
cp_parser_token_starts_cast_expression (cp_token *token)
{
switch (token->type)
{
case CPP_COMMA:
case CPP_SEMICOLON:
case CPP_QUERY:
case CPP_COLON:
case CPP_CLOSE_SQUARE:
case CPP_CLOSE_PAREN:
case CPP_CLOSE_BRACE:
case CPP_DOT:
case CPP_DOT_STAR:
case CPP_DEREF:
case CPP_DEREF_STAR:
case CPP_DIV:
case CPP_MOD:
case CPP_LSHIFT:
case CPP_RSHIFT:
case CPP_LESS:
case CPP_GREATER:
case CPP_LESS_EQ:
case CPP_GREATER_EQ:
case CPP_EQ_EQ:
case CPP_NOT_EQ:
case CPP_EQ:
case CPP_MULT_EQ:
case CPP_DIV_EQ:
case CPP_MOD_EQ:
case CPP_PLUS_EQ:
case CPP_MINUS_EQ:
case CPP_RSHIFT_EQ:
case CPP_LSHIFT_EQ:
case CPP_AND_EQ:
case CPP_XOR_EQ:
case CPP_OR_EQ:
case CPP_XOR:
case CPP_OR:
case CPP_OR_OR:
case CPP_EOF:
return false;
/* '[' may start a primary-expression in obj-c++. */
case CPP_OPEN_SQUARE:
return c_dialect_objc ();
default:
return true;
}
}
/* Parse a cast-expression.
cast-expression:
unary-expression
( type-id ) cast-expression
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p,
cp_id_kind * pidk)
{
/* If it's a `(', then we might be looking at a cast. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type = NULL_TREE;
tree expr = NULL_TREE;
bool compound_literal_p;
const char *saved_message;
/* There's no way to know yet whether or not this is a cast.
For example, `(int (3))' is a unary-expression, while `(int)
3' is a cast. So, we resort to parsing tentatively. */
cp_parser_parse_tentatively (parser);
/* Types may not be defined in a cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in casts");
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* A very tricky bit is that `(struct S) { 3 }' is a
compound-literal (which we permit in C++ as an extension).
But, that construct is not a cast-expression -- it is a
postfix-expression. (The reason is that `(struct S) { 3 }.i'
is legal; if the compound-literal were a cast-expression,
you'd need an extra set of parentheses.) But, if we parse
the type-id, and it happens to be a class-specifier, then we
will commit to the parse at that point, because we cannot
undo the action that is done when creating a new class. So,
then we cannot back up and do a postfix-expression.
Therefore, we scan ahead to the closing `)', and check to see
if the token after the `)' is a `{'. If so, we are not
looking at a cast-expression.
Save tokens so that we can put them back. */
cp_lexer_save_tokens (parser->lexer);
/* Skip tokens until the next token is a closing parenthesis.
If we find the closing `)', and the next token is a `{', then
we are looking at a compound-literal. */
compound_literal_p
= (cp_parser_skip_to_closing_parenthesis (parser, false, false,
/*consume_paren=*/true)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE));
/* Roll back the tokens we skipped. */
cp_lexer_rollback_tokens (parser->lexer);
/* If we were looking at a compound-literal, simulate an error
so that the call to cp_parser_parse_definitely below will
fail. */
if (compound_literal_p)
cp_parser_simulate_error (parser);
else
{
bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
/* Look for the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* At this point this can only be either a cast or a
parenthesized ctor such as `(T ())' that looks like a cast to
function returning T. */
if (!cp_parser_error_occurred (parser)
&& cp_parser_token_starts_cast_expression (cp_lexer_peek_token
(parser->lexer)))
{
cp_parser_parse_definitely (parser);
expr = cp_parser_cast_expression (parser,
/*address_p=*/false,
/*cast_p=*/true, pidk);
/* Warn about old-style casts, if so requested. */
if (warn_old_style_cast
&& !in_system_header
&& !VOID_TYPE_P (type)
&& current_lang_name != lang_name_c)
warning (OPT_Wold_style_cast, "use of old-style cast");
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser,
NIC_CAST))
return error_mark_node;
/* Perform the cast. */
expr = build_c_cast (input_location, type, expr);
return expr;
}
else
cp_parser_abort_tentative_parse (parser);
}
/* If we get here, then it's not a cast, so it must be a
unary-expression. */
return cp_parser_unary_expression (parser, address_p, cast_p, pidk);
}
/* Parse a binary expression of the general form:
pm-expression:
cast-expression
pm-expression .* cast-expression
pm-expression ->* cast-expression
multiplicative-expression:
pm-expression
multiplicative-expression * pm-expression
multiplicative-expression / pm-expression
multiplicative-expression % pm-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
GNU Extension:
relational-expression:
relational-expression <? shift-expression
relational-expression >? shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
and-expression:
equality-expression
and-expression & equality-expression
exclusive-or-expression:
and-expression
exclusive-or-expression ^ and-expression
inclusive-or-expression:
exclusive-or-expression
inclusive-or-expression | exclusive-or-expression
logical-and-expression:
inclusive-or-expression
logical-and-expression && inclusive-or-expression
logical-or-expression:
logical-and-expression
logical-or-expression || logical-and-expression
All these are implemented with a single function like:
binary-expression:
simple-cast-expression
binary-expression <token> binary-expression
CAST_P is true if this expression is the target of a cast.
The binops_by_token map is used to get the tree codes for each <token> type.
binary-expressions are associated according to a precedence table. */
#define TOKEN_PRECEDENCE(token) \
(((token->type == CPP_GREATER \
|| ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)) \
&& !parser->greater_than_is_operator_p) \
? PREC_NOT_OPERATOR \
: binops_by_token[token->type].prec)
static tree
cp_parser_binary_expression (cp_parser* parser, bool cast_p,
bool no_toplevel_fold_p,
enum cp_parser_prec prec,
cp_id_kind * pidk)
{
cp_parser_expression_stack stack;
cp_parser_expression_stack_entry *sp = &stack[0];
tree lhs, rhs;
cp_token *token;
enum tree_code tree_type, lhs_type, rhs_type;
enum cp_parser_prec new_prec, lookahead_prec;
tree overload;
/* Parse the first expression. */
lhs = cp_parser_cast_expression (parser, /*address_p=*/false, cast_p, pidk);
lhs_type = ERROR_MARK;
if (cp_parser_error_occurred (parser))
return error_mark_node;
for (;;)
{
/* Get an operator token. */
token = cp_lexer_peek_token (parser->lexer);
if (warn_cxx0x_compat
&& token->type == CPP_RSHIFT
&& !parser->greater_than_is_operator_p)
{
if (warning_at (token->location, OPT_Wc__0x_compat,
"%<>>%> operator is treated as"
" two right angle brackets in C++11"))
inform (token->location,
"suggest parentheses around %<>>%> expression");
}
new_prec = TOKEN_PRECEDENCE (token);
/* Popping an entry off the stack means we completed a subexpression:
- either we found a token which is not an operator (`>' where it is not
an operator, or prec == PREC_NOT_OPERATOR), in which case popping
will happen repeatedly;
- or, we found an operator which has lower priority. This is the case
where the recursive descent *ascends*, as in `3 * 4 + 5' after
parsing `3 * 4'. */
if (new_prec <= prec)
{
if (sp == stack)
break;
else
goto pop;
}
get_rhs:
tree_type = binops_by_token[token->type].tree_type;
/* We used the operator token. */
cp_lexer_consume_token (parser->lexer);
/* For "false && x" or "true || x", x will never be executed;
disable warnings while evaluating it. */
if (tree_type == TRUTH_ANDIF_EXPR)
c_inhibit_evaluation_warnings += lhs == truthvalue_false_node;
else if (tree_type == TRUTH_ORIF_EXPR)
c_inhibit_evaluation_warnings += lhs == truthvalue_true_node;
/* Extract another operand. It may be the RHS of this expression
or the LHS of a new, higher priority expression. */
rhs = cp_parser_simple_cast_expression (parser);
rhs_type = ERROR_MARK;
/* Get another operator token. Look up its precedence to avoid
building a useless (immediately popped) stack entry for common
cases such as 3 + 4 + 5 or 3 * 4 + 5. */
token = cp_lexer_peek_token (parser->lexer);
lookahead_prec = TOKEN_PRECEDENCE (token);
if (lookahead_prec > new_prec)
{
/* ... and prepare to parse the RHS of the new, higher priority
expression. Since precedence levels on the stack are
monotonically increasing, we do not have to care about
stack overflows. */
sp->prec = prec;
sp->tree_type = tree_type;
sp->lhs = lhs;
sp->lhs_type = lhs_type;
sp++;
lhs = rhs;
lhs_type = rhs_type;
prec = new_prec;
new_prec = lookahead_prec;
goto get_rhs;
pop:
lookahead_prec = new_prec;
/* If the stack is not empty, we have parsed into LHS the right side
(`4' in the example above) of an expression we had suspended.
We can use the information on the stack to recover the LHS (`3')
from the stack together with the tree code (`MULT_EXPR'), and
the precedence of the higher level subexpression
(`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token,
which will be used to actually build the additive expression. */
--sp;
prec = sp->prec;
tree_type = sp->tree_type;
rhs = lhs;
rhs_type = lhs_type;
lhs = sp->lhs;
lhs_type = sp->lhs_type;
}
/* Undo the disabling of warnings done above. */
if (tree_type == TRUTH_ANDIF_EXPR)
c_inhibit_evaluation_warnings -= lhs == truthvalue_false_node;
else if (tree_type == TRUTH_ORIF_EXPR)
c_inhibit_evaluation_warnings -= lhs == truthvalue_true_node;
overload = NULL;
/* ??? Currently we pass lhs_type == ERROR_MARK and rhs_type ==
ERROR_MARK for everything that is not a binary expression.
This makes warn_about_parentheses miss some warnings that
involve unary operators. For unary expressions we should
pass the correct tree_code unless the unary expression was
surrounded by parentheses.
*/
if (no_toplevel_fold_p
&& lookahead_prec <= prec
&& sp == stack
&& TREE_CODE_CLASS (tree_type) == tcc_comparison)
lhs = build2 (tree_type, boolean_type_node, lhs, rhs);
else
lhs = build_x_binary_op (tree_type, lhs, lhs_type, rhs, rhs_type,
&overload, tf_warning_or_error);
lhs_type = tree_type;
/* If the binary operator required the use of an overloaded operator,
then this expression cannot be an integral constant-expression.
An overloaded operator can be used even if both operands are
otherwise permissible in an integral constant-expression if at
least one of the operands is of enumeration type. */
if (overload
&& cp_parser_non_integral_constant_expression (parser,
NIC_OVERLOADED))
return error_mark_node;
}
return lhs;
}
/* Parse the `? expression : assignment-expression' part of a
conditional-expression. The LOGICAL_OR_EXPR is the
logical-or-expression that started the conditional-expression.
Returns a representation of the entire conditional-expression.
This routine is used by cp_parser_assignment_expression.
? expression : assignment-expression
GNU Extensions:
? : assignment-expression */
static tree
cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr)
{
tree expr;
tree assignment_expr;
struct cp_token *token;
/* Consume the `?' token. */
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
if (cp_parser_allow_gnu_extensions_p (parser)
&& token->type == CPP_COLON)
{
pedwarn (token->location, OPT_pedantic,
"ISO C++ does not allow ?: with omitted middle operand");
/* Implicit true clause. */
expr = NULL_TREE;
c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_true_node;
warn_for_omitted_condop (token->location, logical_or_expr);
}
else
{
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* Parse the expression. */
c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_false_node;
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
c_inhibit_evaluation_warnings +=
((logical_or_expr == truthvalue_true_node)
- (logical_or_expr == truthvalue_false_node));
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* The next token should be a `:'. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* Parse the assignment-expression. */
assignment_expr = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL);
c_inhibit_evaluation_warnings -= logical_or_expr == truthvalue_true_node;
/* Build the conditional-expression. */
return build_x_conditional_expr (logical_or_expr,
expr,
assignment_expr,
tf_warning_or_error);
}
/* Parse an assignment-expression.
assignment-expression:
conditional-expression
logical-or-expression assignment-operator assignment_expression
throw-expression
CAST_P is true if this expression is the target of a cast.
Returns a representation for the expression. */
static tree
cp_parser_assignment_expression (cp_parser* parser, bool cast_p,
cp_id_kind * pidk)
{
tree expr;
/* If the next token is the `throw' keyword, then we're looking at
a throw-expression. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW))
expr = cp_parser_throw_expression (parser);
/* Otherwise, it must be that we are looking at a
logical-or-expression. */
else
{
/* Parse the binary expressions (logical-or-expression). */
expr = cp_parser_binary_expression (parser, cast_p, false,
PREC_NOT_OPERATOR, pidk);
/* If the next token is a `?' then we're actually looking at a
conditional-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY))
return cp_parser_question_colon_clause (parser, expr);
else
{
enum tree_code assignment_operator;
/* If it's an assignment-operator, we're using the second
production. */
assignment_operator
= cp_parser_assignment_operator_opt (parser);
if (assignment_operator != ERROR_MARK)
{
bool non_constant_p;
/* Parse the right-hand side of the assignment. */
tree rhs = cp_parser_initializer_clause (parser, &non_constant_p);
if (BRACE_ENCLOSED_INITIALIZER_P (rhs))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
/* An assignment may not appear in a
constant-expression. */
if (cp_parser_non_integral_constant_expression (parser,
NIC_ASSIGNMENT))
return error_mark_node;
/* Build the assignment expression. */
expr = build_x_modify_expr (expr,
assignment_operator,
rhs,
tf_warning_or_error);
}
}
}
return expr;
}
/* Parse an (optional) assignment-operator.
assignment-operator: one of
= *= /= %= += -= >>= <<= &= ^= |=
GNU Extension:
assignment-operator: one of
<?= >?=
If the next token is an assignment operator, the corresponding tree
code is returned, and the token is consumed. For example, for
`+=', PLUS_EXPR is returned. For `=' itself, the code returned is
NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%',
TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment
operator, ERROR_MARK is returned. */
static enum tree_code
cp_parser_assignment_operator_opt (cp_parser* parser)
{
enum tree_code op;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EQ:
op = NOP_EXPR;
break;
case CPP_MULT_EQ:
op = MULT_EXPR;
break;
case CPP_DIV_EQ:
op = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
op = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
op = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
op = MINUS_EXPR;
break;
case CPP_RSHIFT_EQ:
op = RSHIFT_EXPR;
break;
case CPP_LSHIFT_EQ:
op = LSHIFT_EXPR;
break;
case CPP_AND_EQ:
op = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
op = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
op = BIT_IOR_EXPR;
break;
default:
/* Nothing else is an assignment operator. */
op = ERROR_MARK;
}
/* If it was an assignment operator, consume it. */
if (op != ERROR_MARK)
cp_lexer_consume_token (parser->lexer);
return op;
}
/* Parse an expression.
expression:
assignment-expression
expression , assignment-expression
CAST_P is true if this expression is the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_expression (cp_parser* parser, bool cast_p, cp_id_kind * pidk)
{
tree expression = NULL_TREE;
while (true)
{
tree assignment_expression;
/* Parse the next assignment-expression. */
assignment_expression
= cp_parser_assignment_expression (parser, cast_p, pidk);
/* If this is the first assignment-expression, we can just
save it away. */
if (!expression)
expression = assignment_expression;
else
expression = build_x_compound_expr (expression,
assignment_expression,
tf_warning_or_error);
/* If the next token is not a comma, then we are done with the
expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
/* A comma operator cannot appear in a constant-expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_COMMA))
expression = error_mark_node;
}
return expression;
}
/* Parse a constant-expression.
constant-expression:
conditional-expression
If ALLOW_NON_CONSTANT_P a non-constant expression is silently
accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not
constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P
is false, NON_CONSTANT_P should be NULL. */
static tree
cp_parser_constant_expression (cp_parser* parser,
bool allow_non_constant_p,
bool *non_constant_p)
{
bool saved_integral_constant_expression_p;
bool saved_allow_non_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
tree expression;
/* It might seem that we could simply parse the
conditional-expression, and then check to see if it were
TREE_CONSTANT. However, an expression that is TREE_CONSTANT is
one that the compiler can figure out is constant, possibly after
doing some simplifications or optimizations. The standard has a
precise definition of constant-expression, and we must honor
that, even though it is somewhat more restrictive.
For example:
int i[(2, 3)];
is not a legal declaration, because `(2, 3)' is not a
constant-expression. The `,' operator is forbidden in a
constant-expression. However, GCC's constant-folding machinery
will fold this operation to an INTEGER_CST for `3'. */
/* Save the old settings. */
saved_integral_constant_expression_p = parser->integral_constant_expression_p;
saved_allow_non_integral_constant_expression_p
= parser->allow_non_integral_constant_expression_p;
saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p;
/* We are now parsing a constant-expression. */
parser->integral_constant_expression_p = true;
parser->allow_non_integral_constant_expression_p
= (allow_non_constant_p || cxx_dialect >= cxx0x);
parser->non_integral_constant_expression_p = false;
/* Although the grammar says "conditional-expression", we parse an
"assignment-expression", which also permits "throw-expression"
and the use of assignment operators. In the case that
ALLOW_NON_CONSTANT_P is false, we get better errors than we would
otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is
actually essential that we look for an assignment-expression.
For example, cp_parser_initializer_clauses uses this function to
determine whether a particular assignment-expression is in fact
constant. */
expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL);
/* Restore the old settings. */
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->allow_non_integral_constant_expression_p
= saved_allow_non_integral_constant_expression_p;
if (cxx_dialect >= cxx0x)
{
/* Require an rvalue constant expression here; that's what our
callers expect. Reference constant expressions are handled
separately in e.g. cp_parser_template_argument. */
bool is_const = potential_rvalue_constant_expression (expression);
parser->non_integral_constant_expression_p = !is_const;
if (!is_const && !allow_non_constant_p)
require_potential_rvalue_constant_expression (expression);
}
if (allow_non_constant_p)
*non_constant_p = parser->non_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expression;
}
/* Parse __builtin_offsetof.
offsetof-expression:
"__builtin_offsetof" "(" type-id "," offsetof-member-designator ")"
offsetof-member-designator:
id-expression
| offsetof-member-designator "." id-expression
| offsetof-member-designator "[" expression "]"
| offsetof-member-designator "->" id-expression */
static tree
cp_parser_builtin_offsetof (cp_parser *parser)
{
int save_ice_p, save_non_ice_p;
tree type, expr;
cp_id_kind dummy;
cp_token *token;
/* We're about to accept non-integral-constant things, but will
definitely yield an integral constant expression. Save and
restore these values around our local parsing. */
save_ice_p = parser->integral_constant_expression_p;
save_non_ice_p = parser->non_integral_constant_expression_p;
/* Consume the "__builtin_offsetof" token. */
cp_lexer_consume_token (parser->lexer);
/* Consume the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
token = cp_lexer_peek_token (parser->lexer);
/* Build the (type *)null that begins the traditional offsetof macro. */
expr = build_static_cast (build_pointer_type (type), null_pointer_node,
tf_warning_or_error);
/* Parse the offsetof-member-designator. We begin as if we saw "expr->". */
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr,
true, &dummy, token->location);
while (true)
{
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
/* offsetof-member-designator "[" expression "]" */
expr = cp_parser_postfix_open_square_expression (parser, expr, true);
break;
case CPP_DEREF:
/* offsetof-member-designator "->" identifier */
expr = grok_array_decl (expr, integer_zero_node);
/* FALLTHRU */
case CPP_DOT:
/* offsetof-member-designator "." identifier */
cp_lexer_consume_token (parser->lexer);
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT,
expr, true, &dummy,
token->location);
break;
case CPP_CLOSE_PAREN:
/* Consume the ")" token. */
cp_lexer_consume_token (parser->lexer);
goto success;
default:
/* Error. We know the following require will fail, but
that gives the proper error message. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_skip_to_closing_parenthesis (parser, true, false, true);
expr = error_mark_node;
goto failure;
}
}
success:
/* If we're processing a template, we can't finish the semantics yet.
Otherwise we can fold the entire expression now. */
if (processing_template_decl)
expr = build1 (OFFSETOF_EXPR, size_type_node, expr);
else
expr = finish_offsetof (expr);
failure:
parser->integral_constant_expression_p = save_ice_p;
parser->non_integral_constant_expression_p = save_non_ice_p;
return expr;
}
/* Parse a trait expression.
Returns a representation of the expression, the underlying type
of the type at issue when KEYWORD is RID_UNDERLYING_TYPE. */
static tree
cp_parser_trait_expr (cp_parser* parser, enum rid keyword)
{
cp_trait_kind kind;
tree type1, type2 = NULL_TREE;
bool binary = false;
cp_decl_specifier_seq decl_specs;
switch (keyword)
{
case RID_HAS_NOTHROW_ASSIGN:
kind = CPTK_HAS_NOTHROW_ASSIGN;
break;
case RID_HAS_NOTHROW_CONSTRUCTOR:
kind = CPTK_HAS_NOTHROW_CONSTRUCTOR;
break;
case RID_HAS_NOTHROW_COPY:
kind = CPTK_HAS_NOTHROW_COPY;
break;
case RID_HAS_TRIVIAL_ASSIGN:
kind = CPTK_HAS_TRIVIAL_ASSIGN;
break;
case RID_HAS_TRIVIAL_CONSTRUCTOR:
kind = CPTK_HAS_TRIVIAL_CONSTRUCTOR;
break;
case RID_HAS_TRIVIAL_COPY:
kind = CPTK_HAS_TRIVIAL_COPY;
break;
case RID_HAS_TRIVIAL_DESTRUCTOR:
kind = CPTK_HAS_TRIVIAL_DESTRUCTOR;
break;
case RID_HAS_VIRTUAL_DESTRUCTOR:
kind = CPTK_HAS_VIRTUAL_DESTRUCTOR;
break;
case RID_IS_ABSTRACT:
kind = CPTK_IS_ABSTRACT;
break;
case RID_IS_BASE_OF:
kind = CPTK_IS_BASE_OF;
binary = true;
break;
case RID_IS_CLASS:
kind = CPTK_IS_CLASS;
break;
case RID_IS_CONVERTIBLE_TO:
kind = CPTK_IS_CONVERTIBLE_TO;
binary = true;
break;
case RID_IS_EMPTY:
kind = CPTK_IS_EMPTY;
break;
case RID_IS_ENUM:
kind = CPTK_IS_ENUM;
break;
case RID_IS_FINAL:
kind = CPTK_IS_FINAL;
break;
case RID_IS_LITERAL_TYPE:
kind = CPTK_IS_LITERAL_TYPE;
break;
case RID_IS_POD:
kind = CPTK_IS_POD;
break;
case RID_IS_POLYMORPHIC:
kind = CPTK_IS_POLYMORPHIC;
break;
case RID_IS_STD_LAYOUT:
kind = CPTK_IS_STD_LAYOUT;
break;
case RID_IS_TRIVIAL:
kind = CPTK_IS_TRIVIAL;
break;
case RID_IS_UNION:
kind = CPTK_IS_UNION;
break;
case RID_UNDERLYING_TYPE:
kind = CPTK_UNDERLYING_TYPE;
break;
case RID_BASES:
kind = CPTK_BASES;
break;
case RID_DIRECT_BASES:
kind = CPTK_DIRECT_BASES;
break;
default:
gcc_unreachable ();
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
type1 = cp_parser_type_id (parser);
if (type1 == error_mark_node)
return error_mark_node;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type1;
/* Call grokdeclarator to figure out what type this is. */
type1 = grokdeclarator (NULL, &decl_specs, TYPENAME,
/*initialized=*/0, /*attrlist=*/NULL);
if (binary)
{
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
type2 = cp_parser_type_id (parser);
if (type2 == error_mark_node)
return error_mark_node;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type2;
/* Call grokdeclarator to figure out what type this is. */
type2 = grokdeclarator (NULL, &decl_specs, TYPENAME,
/*initialized=*/0, /*attrlist=*/NULL);
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Complete the trait expression, which may mean either processing
the trait expr now or saving it for template instantiation. */
switch(kind)
{
case CPTK_UNDERLYING_TYPE:
return finish_underlying_type (type1);
case CPTK_BASES:
return finish_bases (type1, false);
case CPTK_DIRECT_BASES:
return finish_bases (type1, true);
default:
return finish_trait_expr (kind, type1, type2);
}
}
/* Lambdas that appear in variable initializer or default argument scope
get that in their mangling, so we need to record it. We might as well
use the count for function and namespace scopes as well. */
static GTY(()) tree lambda_scope;
static GTY(()) int lambda_count;
typedef struct GTY(()) tree_int
{
tree t;
int i;
} tree_int;
DEF_VEC_O(tree_int);
DEF_VEC_ALLOC_O(tree_int,gc);
static GTY(()) VEC(tree_int,gc) *lambda_scope_stack;
static void
start_lambda_scope (tree decl)
{
tree_int ti;
gcc_assert (decl);
/* Once we're inside a function, we ignore other scopes and just push
the function again so that popping works properly. */
if (current_function_decl && TREE_CODE (decl) != FUNCTION_DECL)
decl = current_function_decl;
ti.t = lambda_scope;
ti.i = lambda_count;
VEC_safe_push (tree_int, gc, lambda_scope_stack, &ti);
if (lambda_scope != decl)
{
/* Don't reset the count if we're still in the same function. */
lambda_scope = decl;
lambda_count = 0;
}
}
static void
record_lambda_scope (tree lambda)
{
LAMBDA_EXPR_EXTRA_SCOPE (lambda) = lambda_scope;
LAMBDA_EXPR_DISCRIMINATOR (lambda) = lambda_count++;
}
static void
finish_lambda_scope (void)
{
tree_int *p = VEC_last (tree_int, lambda_scope_stack);
if (lambda_scope != p->t)
{
lambda_scope = p->t;
lambda_count = p->i;
}
VEC_pop (tree_int, lambda_scope_stack);
}
/* Parse a lambda expression.
lambda-expression:
lambda-introducer lambda-declarator [opt] compound-statement
Returns a representation of the expression. */
static tree
cp_parser_lambda_expression (cp_parser* parser)
{
tree lambda_expr = build_lambda_expr ();
tree type;
bool ok;
LAMBDA_EXPR_LOCATION (lambda_expr)
= cp_lexer_peek_token (parser->lexer)->location;
if (cp_unevaluated_operand)
error_at (LAMBDA_EXPR_LOCATION (lambda_expr),
"lambda-expression in unevaluated context");
/* We may be in the middle of deferred access check. Disable
it now. */
push_deferring_access_checks (dk_no_deferred);
cp_parser_lambda_introducer (parser, lambda_expr);
type = begin_lambda_type (lambda_expr);
if (type == error_mark_node)
return error_mark_node;
record_lambda_scope (lambda_expr);
/* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */
determine_visibility (TYPE_NAME (type));
/* Now that we've started the type, add the capture fields for any
explicit captures. */
register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr));
{
/* Inside the class, surrounding template-parameter-lists do not apply. */
unsigned int saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
unsigned char in_statement = parser->in_statement;
bool in_switch_statement_p = parser->in_switch_statement_p;
parser->num_template_parameter_lists = 0;
parser->in_statement = 0;
parser->in_switch_statement_p = false;
/* By virtue of defining a local class, a lambda expression has access to
the private variables of enclosing classes. */
ok = cp_parser_lambda_declarator_opt (parser, lambda_expr);
if (ok)
cp_parser_lambda_body (parser, lambda_expr);
else if (cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
cp_parser_skip_to_end_of_block_or_statement (parser);
/* The capture list was built up in reverse order; fix that now. */
{
tree newlist = NULL_TREE;
tree elt, next;
for (elt = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr);
elt; elt = next)
{
next = TREE_CHAIN (elt);
TREE_CHAIN (elt) = newlist;
newlist = elt;
}
LAMBDA_EXPR_CAPTURE_LIST (lambda_expr) = newlist;
}
if (ok)
maybe_add_lambda_conv_op (type);
type = finish_struct (type, /*attributes=*/NULL_TREE);
parser->num_template_parameter_lists = saved_num_template_parameter_lists;
parser->in_statement = in_statement;
parser->in_switch_statement_p = in_switch_statement_p;
}
pop_deferring_access_checks ();
/* This field is only used during parsing of the lambda. */
LAMBDA_EXPR_THIS_CAPTURE (lambda_expr) = NULL_TREE;
/* This lambda shouldn't have any proxies left at this point. */
gcc_assert (LAMBDA_EXPR_PENDING_PROXIES (lambda_expr) == NULL);
/* And now that we're done, push proxies for an enclosing lambda. */
insert_pending_capture_proxies ();
if (ok)
return build_lambda_object (lambda_expr);
else
return error_mark_node;
}
/* Parse the beginning of a lambda expression.
lambda-introducer:
[ lambda-capture [opt] ]
LAMBDA_EXPR is the current representation of the lambda expression. */
static void
cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
{
/* Need commas after the first capture. */
bool first = true;
/* Eat the leading `['. */
cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE);
/* Record default capture mode. "[&" "[=" "[&," "[=," */
if (cp_lexer_next_token_is (parser->lexer, CPP_AND)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_NAME)
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_REFERENCE;
else if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_COPY;
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE)
{
cp_lexer_consume_token (parser->lexer);
first = false;
}
while (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE))
{
cp_token* capture_token;
tree capture_id;
tree capture_init_expr;
cp_id_kind idk = CP_ID_KIND_NONE;
bool explicit_init_p = false;
enum capture_kind_type
{
BY_COPY,
BY_REFERENCE
};
enum capture_kind_type capture_kind = BY_COPY;
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
error ("expected end of capture-list");
return;
}
if (first)
first = false;
else
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Possibly capture `this'. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THIS))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY)
pedwarn (loc, 0, "explicit by-copy capture of %<this%> redundant "
"with by-copy capture default");
cp_lexer_consume_token (parser->lexer);
add_capture (lambda_expr,
/*id=*/this_identifier,
/*initializer=*/finish_this_expr(),
/*by_reference_p=*/false,
explicit_init_p);
continue;
}
/* Remember whether we want to capture as a reference or not. */
if (cp_lexer_next_token_is (parser->lexer, CPP_AND))
{
capture_kind = BY_REFERENCE;
cp_lexer_consume_token (parser->lexer);
}
/* Get the identifier. */
capture_token = cp_lexer_peek_token (parser->lexer);
capture_id = cp_parser_identifier (parser);
if (capture_id == error_mark_node)
/* Would be nice to have a cp_parser_skip_to_closing_x for general
delimiters, but I modified this to stop on unnested ']' as well. It
was already changed to stop on unnested '}', so the
"closing_parenthesis" name is no more misleading with my change. */
{
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
break;
}
/* Find the initializer for this capture. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* An explicit expression exists. */
cp_lexer_consume_token (parser->lexer);
pedwarn (input_location, OPT_pedantic,
"ISO C++ does not allow initializers "
"in lambda expression capture lists");
capture_init_expr = cp_parser_assignment_expression (parser,
/*cast_p=*/true,
&idk);
explicit_init_p = true;
}
else
{
const char* error_msg;
/* Turn the identifier into an id-expression. */
capture_init_expr
= cp_parser_lookup_name
(parser,
capture_id,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
capture_token->location);
if (capture_init_expr == error_mark_node)
{
unqualified_name_lookup_error (capture_id);
continue;
}
else if (DECL_P (capture_init_expr)
&& (TREE_CODE (capture_init_expr) != VAR_DECL
&& TREE_CODE (capture_init_expr) != PARM_DECL))
{
error_at (capture_token->location,
"capture of non-variable %qD ",
capture_init_expr);
inform (0, "%q+#D declared here", capture_init_expr);
continue;
}
if (TREE_CODE (capture_init_expr) == VAR_DECL
&& decl_storage_duration (capture_init_expr) != dk_auto)
{
pedwarn (capture_token->location, 0, "capture of variable "
"%qD with non-automatic storage duration",
capture_init_expr);
inform (0, "%q+#D declared here", capture_init_expr);
continue;
}
capture_init_expr
= finish_id_expression
(capture_id,
capture_init_expr,
parser->scope,
&idk,
/*integral_constant_expression_p=*/false,
/*allow_non_integral_constant_expression_p=*/false,
/*non_integral_constant_expression_p=*/NULL,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
capture_token->location);
}
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE
&& !explicit_init_p)
{
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY
&& capture_kind == BY_COPY)
pedwarn (capture_token->location, 0, "explicit by-copy capture "
"of %qD redundant with by-copy capture default",
capture_id);
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_REFERENCE
&& capture_kind == BY_REFERENCE)
pedwarn (capture_token->location, 0, "explicit by-reference "
"capture of %qD redundant with by-reference capture "
"default", capture_id);
}
add_capture (lambda_expr,
capture_id,
capture_init_expr,
/*by_reference_p=*/capture_kind == BY_REFERENCE,
explicit_init_p);
}
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
}
/* Parse the (optional) middle of a lambda expression.
lambda-declarator:
( parameter-declaration-clause [opt] )
attribute-specifier [opt]
mutable [opt]
exception-specification [opt]
lambda-return-type-clause [opt]
LAMBDA_EXPR is the current representation of the lambda expression. */
static bool
cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr)
{
/* 5.1.1.4 of the standard says:
If a lambda-expression does not include a lambda-declarator, it is as if
the lambda-declarator were ().
This means an empty parameter list, no attributes, and no exception
specification. */
tree param_list = void_list_node;
tree attributes = NULL_TREE;
tree exception_spec = NULL_TREE;
tree t;
/* The lambda-declarator is optional, but must begin with an opening
parenthesis if present. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
begin_scope (sk_function_parms, /*entity=*/NULL_TREE);
/* Parse parameters. */
param_list = cp_parser_parameter_declaration_clause (parser);
/* Default arguments shall not be specified in the
parameter-declaration-clause of a lambda-declarator. */
for (t = param_list; t; t = TREE_CHAIN (t))
if (TREE_PURPOSE (t))
pedwarn (DECL_SOURCE_LOCATION (TREE_VALUE (t)), OPT_pedantic,
"default argument specified for lambda parameter");
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
attributes = cp_parser_attributes_opt (parser);
/* Parse optional `mutable' keyword. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_MUTABLE))
{
cp_lexer_consume_token (parser->lexer);
LAMBDA_EXPR_MUTABLE_P (lambda_expr) = 1;
}
/* Parse optional exception specification. */
exception_spec = cp_parser_exception_specification_opt (parser);
/* Parse optional trailing return type. */
if (cp_lexer_next_token_is (parser->lexer, CPP_DEREF))
{
cp_lexer_consume_token (parser->lexer);
LAMBDA_EXPR_RETURN_TYPE (lambda_expr) = cp_parser_type_id (parser);
}
/* The function parameters must be in scope all the way until after the
trailing-return-type in case of decltype. */
for (t = current_binding_level->names; t; t = DECL_CHAIN (t))
pop_binding (DECL_NAME (t), t);
leave_scope ();
}
/* Create the function call operator.
Messing with declarators like this is no uglier than building up the
FUNCTION_DECL by hand, and this is less likely to get out of sync with
other code. */
{
cp_decl_specifier_seq return_type_specs;
cp_declarator* declarator;
tree fco;
int quals;
void *p;
clear_decl_specs (&return_type_specs);
if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr))
return_type_specs.type = LAMBDA_EXPR_RETURN_TYPE (lambda_expr);
else
/* Maybe we will deduce the return type later, but we can use void
as a placeholder return type anyways. */
return_type_specs.type = void_type_node;
p = obstack_alloc (&declarator_obstack, 0);
declarator = make_id_declarator (NULL_TREE, ansi_opname (CALL_EXPR),
sfk_none);
quals = (LAMBDA_EXPR_MUTABLE_P (lambda_expr)
? TYPE_UNQUALIFIED : TYPE_QUAL_CONST);
declarator = make_call_declarator (declarator, param_list, quals,
VIRT_SPEC_UNSPECIFIED,
exception_spec,
/*late_return_type=*/NULL_TREE);
declarator->id_loc = LAMBDA_EXPR_LOCATION (lambda_expr);
fco = grokmethod (&return_type_specs,
declarator,
attributes);
if (fco != error_mark_node)
{
DECL_INITIALIZED_IN_CLASS_P (fco) = 1;
DECL_ARTIFICIAL (fco) = 1;
/* Give the object parameter a different name. */
DECL_NAME (DECL_ARGUMENTS (fco)) = get_identifier ("__closure");
}
finish_member_declaration (fco);
obstack_free (&declarator_obstack, p);
return (fco != error_mark_node);
}
}
/* Parse the body of a lambda expression, which is simply
compound-statement
but which requires special handling.
LAMBDA_EXPR is the current representation of the lambda expression. */
static void
cp_parser_lambda_body (cp_parser* parser, tree lambda_expr)
{
bool nested = (current_function_decl != NULL_TREE);
bool local_variables_forbidden_p = parser->local_variables_forbidden_p;
if (nested)
push_function_context ();
else
/* Still increment function_depth so that we don't GC in the
middle of an expression. */
++function_depth;
/* Clear this in case we're in the middle of a default argument. */
parser->local_variables_forbidden_p = false;
/* Finish the function call operator
- class_specifier
+ late_parsing_for_member
+ function_definition_after_declarator
+ ctor_initializer_opt_and_function_body */
{
tree fco = lambda_function (lambda_expr);
tree body;
bool done = false;
tree compound_stmt;
tree cap;
/* Let the front end know that we are going to be defining this
function. */
start_preparsed_function (fco,
NULL_TREE,
SF_PRE_PARSED | SF_INCLASS_INLINE);
start_lambda_scope (fco);
body = begin_function_body ();
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
goto out;
/* Push the proxies for any explicit captures. */
for (cap = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); cap;
cap = TREE_CHAIN (cap))
build_capture_proxy (TREE_PURPOSE (cap));
compound_stmt = begin_compound_stmt (0);
/* 5.1.1.4 of the standard says:
If a lambda-expression does not include a trailing-return-type, it
is as if the trailing-return-type denotes the following type:
* if the compound-statement is of the form
{ return attribute-specifier [opt] expression ; }
the type of the returned expression after lvalue-to-rvalue
conversion (_conv.lval_ 4.1), array-to-pointer conversion
(_conv.array_ 4.2), and function-to-pointer conversion
(_conv.func_ 4.3);
* otherwise, void. */
/* In a lambda that has neither a lambda-return-type-clause
nor a deducible form, errors should be reported for return statements
in the body. Since we used void as the placeholder return type, parsing
the body as usual will give such desired behavior. */
if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr)
&& cp_lexer_peek_nth_token (parser->lexer, 1)->keyword == RID_RETURN
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SEMICOLON)
{
tree expr = NULL_TREE;
cp_id_kind idk = CP_ID_KIND_NONE;
/* Parse tentatively in case there's more after the initial return
statement. */
cp_parser_parse_tentatively (parser);
cp_parser_require_keyword (parser, RID_RETURN, RT_RETURN);
expr = cp_parser_expression (parser, /*cast_p=*/false, &idk);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
if (cp_parser_parse_definitely (parser))
{
apply_lambda_return_type (lambda_expr, lambda_return_type (expr));
/* Will get error here if type not deduced yet. */
finish_return_stmt (expr);
done = true;
}
}
if (!done)
{
if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr))
LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = true;
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
cp_parser_statement_seq_opt (parser, NULL_TREE);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = false;
}
finish_compound_stmt (compound_stmt);
out:
finish_function_body (body);
finish_lambda_scope ();
/* Finish the function and generate code for it if necessary. */
expand_or_defer_fn (finish_function (/*inline*/2));
}
parser->local_variables_forbidden_p = local_variables_forbidden_p;
if (nested)
pop_function_context();
else
--function_depth;
}
/* Statements [gram.stmt.stmt] */
/* Parse a statement.
statement:
labeled-statement
expression-statement
compound-statement
selection-statement
iteration-statement
jump-statement
declaration-statement
try-block
TM Extension:
statement:
atomic-statement
IN_COMPOUND is true when the statement is nested inside a
cp_parser_compound_statement; this matters for certain pragmas.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in braces
and has an else clause. This is used to implement -Wparentheses. */
static void
cp_parser_statement (cp_parser* parser, tree in_statement_expr,
bool in_compound, bool *if_p)
{
tree statement;
cp_token *token;
location_t statement_location;
restart:
if (if_p != NULL)
*if_p = false;
/* There is no statement yet. */
statement = NULL_TREE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Remember the location of the first token in the statement. */
statement_location = token->location;
/* If this is a keyword, then that will often determine what kind of
statement we have. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_CASE:
case RID_DEFAULT:
/* Looks like a labeled-statement with a case label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser);
goto restart;
case RID_IF:
case RID_SWITCH:
statement = cp_parser_selection_statement (parser, if_p);
break;
case RID_WHILE:
case RID_DO:
case RID_FOR:
statement = cp_parser_iteration_statement (parser);
break;
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
statement = cp_parser_jump_statement (parser);
break;
/* Objective-C++ exception-handling constructs. */
case RID_AT_TRY:
case RID_AT_CATCH:
case RID_AT_FINALLY:
case RID_AT_SYNCHRONIZED:
case RID_AT_THROW:
statement = cp_parser_objc_statement (parser);
break;
case RID_TRY:
statement = cp_parser_try_block (parser);
break;
case RID_NAMESPACE:
/* This must be a namespace alias definition. */
cp_parser_declaration_statement (parser);
return;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
statement = cp_parser_transaction (parser, keyword);
break;
case RID_TRANSACTION_CANCEL:
statement = cp_parser_transaction_cancel (parser);
break;
default:
/* It might be a keyword like `int' that can start a
declaration-statement. */
break;
}
}
else if (token->type == CPP_NAME)
{
/* If the next token is a `:', then we are looking at a
labeled-statement. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type == CPP_COLON)
{
/* Looks like a labeled-statement with an ordinary label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser);
goto restart;
}
}
/* Anything that starts with a `{' must be a compound-statement. */
else if (token->type == CPP_OPEN_BRACE)
statement = cp_parser_compound_statement (parser, NULL, false, false);
/* CPP_PRAGMA is a #pragma inside a function body, which constitutes
a statement all its own. */
else if (token->type == CPP_PRAGMA)
{
/* Only certain OpenMP pragmas are attached to statements, and thus
are considered statements themselves. All others are not. In
the context of a compound, accept the pragma as a "statement" and
return so that we can check for a close brace. Otherwise we
require a real statement and must go back and read one. */
if (in_compound)
cp_parser_pragma (parser, pragma_compound);
else if (!cp_parser_pragma (parser, pragma_stmt))
goto restart;
return;
}
else if (token->type == CPP_EOF)
{
cp_parser_error (parser, "expected statement");
return;
}
/* Everything else must be a declaration-statement or an
expression-statement. Try for the declaration-statement
first, unless we are looking at a `;', in which case we know that
we have an expression-statement. */
if (!statement)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_parse_tentatively (parser);
/* Try to parse the declaration-statement. */
cp_parser_declaration_statement (parser);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return;
}
/* Look for an expression-statement instead. */
statement = cp_parser_expression_statement (parser, in_statement_expr);
}
/* Set the line number for the statement. */
if (statement && STATEMENT_CODE_P (TREE_CODE (statement)))
SET_EXPR_LOCATION (statement, statement_location);
}
/* Parse the label for a labeled-statement, i.e.
identifier :
case constant-expression :
default :
GNU Extension:
case constant-expression ... constant-expression : statement
When a label is parsed without errors, the label is added to the
parse tree by the finish_* functions, so this function doesn't
have to return the label. */
static void
cp_parser_label_for_labeled_statement (cp_parser* parser)
{
cp_token *token;
tree label = NULL_TREE;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
/* The next token should be an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME
&& token->type != CPP_KEYWORD)
{
cp_parser_error (parser, "expected labeled-statement");
return;
}
parser->colon_corrects_to_scope_p = false;
switch (token->keyword)
{
case RID_CASE:
{
tree expr, expr_hi;
cp_token *ellipsis;
/* Consume the `case' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the constant-expression. */
expr = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
ellipsis = cp_lexer_peek_token (parser->lexer);
if (ellipsis->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
expr_hi =
cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
/* We don't need to emit warnings here, as the common code
will do this for us. */
}
else
expr_hi = NULL_TREE;
if (parser->in_switch_statement_p)
finish_case_label (token->location, expr, expr_hi);
else
error_at (token->location,
"case label %qE not within a switch statement",
expr);
}
break;
case RID_DEFAULT:
/* Consume the `default' token. */
cp_lexer_consume_token (parser->lexer);
if (parser->in_switch_statement_p)
finish_case_label (token->location, NULL_TREE, NULL_TREE);
else
error_at (token->location, "case label not within a switch statement");
break;
default:
/* Anything else must be an ordinary label. */
label = finish_label_stmt (cp_parser_identifier (parser));
break;
}
/* Require the `:' token. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* An ordinary label may optionally be followed by attributes.
However, this is only permitted if the attributes are then
followed by a semicolon. This is because, for backward
compatibility, when parsing
lab: __attribute__ ((unused)) int i;
we want the attribute to attach to "i", not "lab". */
if (label != NULL_TREE
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
{
tree attrs;
cp_parser_parse_tentatively (parser);
attrs = cp_parser_attributes_opt (parser);
if (attrs == NULL_TREE
|| cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cp_parser_abort_tentative_parse (parser);
else if (!cp_parser_parse_definitely (parser))
;
else
cplus_decl_attributes (&label, attrs, 0);
}
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* Parse an expression-statement.
expression-statement:
expression [opt] ;
Returns the new EXPR_STMT -- or NULL_TREE if the expression
statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P
indicates whether this expression-statement is part of an
expression statement. */
static tree
cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr)
{
tree statement = NULL_TREE;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a ';', then there is no expression
statement. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
statement = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* Give a helpful message for "A<T>::type t;" and the like. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
{
if (TREE_CODE (statement) == SCOPE_REF)
error_at (token->location, "need %<typename%> before %qE because "
"%qT is a dependent scope",
statement, TREE_OPERAND (statement, 0));
else if (is_overloaded_fn (statement)
&& DECL_CONSTRUCTOR_P (get_first_fn (statement)))
{
/* A::A a; */
tree fn = get_first_fn (statement);
error_at (token->location,
"%<%T::%D%> names the constructor, not the type",
DECL_CONTEXT (fn), DECL_NAME (fn));
}
}
/* Consume the final `;'. */
cp_parser_consume_semicolon_at_end_of_statement (parser);
if (in_statement_expr
&& cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
/* This is the final expression statement of a statement
expression. */
statement = finish_stmt_expr_expr (statement, in_statement_expr);
else if (statement)
statement = finish_expr_stmt (statement);
else
finish_stmt ();
return statement;
}
/* Parse a compound-statement.
compound-statement:
{ statement-seq [opt] }
GNU extension:
compound-statement:
{ label-declaration-seq [opt] statement-seq [opt] }
label-declaration-seq:
label-declaration
label-declaration-seq label-declaration
Returns a tree representing the statement. */
static tree
cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr,
bool in_try, bool function_body)
{
tree compound_stmt;
/* Consume the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
return error_mark_node;
if (DECL_DECLARED_CONSTEXPR_P (current_function_decl)
&& !function_body)
pedwarn (input_location, OPT_pedantic,
"compound-statement in constexpr function");
/* Begin the compound-statement. */
compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0);
/* If the next keyword is `__label__' we have a label declaration. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
/* Parse an (optional) statement-seq. */
cp_parser_statement_seq_opt (parser, in_statement_expr);
/* Finish the compound-statement. */
finish_compound_stmt (compound_stmt);
/* Consume the `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
return compound_stmt;
}
/* Parse an (optional) statement-seq.
statement-seq:
statement
statement-seq [opt] statement */
static void
cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr)
{
/* Scan statements until there aren't any more. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If we are looking at a `}', then we have run out of
statements; the same is true if we have reached the end
of file, or have stumbled upon a stray '@end'. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL
|| (token->type == CPP_KEYWORD && token->keyword == RID_AT_END))
break;
/* If we are in a compound statement and find 'else' then
something went wrong. */
else if (token->type == CPP_KEYWORD && token->keyword == RID_ELSE)
{
if (parser->in_statement & IN_IF_STMT)
break;
else
{
token = cp_lexer_consume_token (parser->lexer);
error_at (token->location, "%<else%> without a previous %<if%>");
}
}
/* Parse the statement. */
cp_parser_statement (parser, in_statement_expr, true, NULL);
}
}
/* Parse a selection-statement.
selection-statement:
if ( condition ) statement
if ( condition ) statement else statement
switch ( condition ) statement
Returns the new IF_STMT or SWITCH_STMT.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in
braces and has an else clause. This is used to implement
-Wparentheses. */
static tree
cp_parser_selection_statement (cp_parser* parser, bool *if_p)
{
cp_token *token;
enum rid keyword;
if (if_p != NULL)
*if_p = false;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_SELECT);
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_IF:
case RID_SWITCH:
{
tree statement;
tree condition;
/* Look for the `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
/* Begin the selection-statement. */
if (keyword == RID_IF)
statement = begin_if_stmt ();
else
statement = begin_switch_stmt ();
/* Parse the condition. */
condition = cp_parser_condition (parser);
/* Look for the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
if (keyword == RID_IF)
{
bool nested_if;
unsigned char in_statement;
/* Add the condition. */
finish_if_stmt_cond (condition, statement);
/* Parse the then-clause. */
in_statement = parser->in_statement;
parser->in_statement |= IN_IF_STMT;
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
add_stmt (build_empty_stmt (loc));
cp_lexer_consume_token (parser->lexer);
if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE))
warning_at (loc, OPT_Wempty_body, "suggest braces around "
"empty body in an %<if%> statement");
nested_if = false;
}
else
cp_parser_implicitly_scoped_statement (parser, &nested_if);
parser->in_statement = in_statement;
finish_then_clause (statement);
/* If the next token is `else', parse the else-clause. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_ELSE))
{
/* Consume the `else' keyword. */
cp_lexer_consume_token (parser->lexer);
begin_else_clause (statement);
/* Parse the else-clause. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
warning_at (loc,
OPT_Wempty_body, "suggest braces around "
"empty body in an %<else%> statement");
add_stmt (build_empty_stmt (loc));
cp_lexer_consume_token (parser->lexer);
}
else
cp_parser_implicitly_scoped_statement (parser, NULL);
finish_else_clause (statement);
/* If we are currently parsing a then-clause, then
IF_P will not be NULL. We set it to true to
indicate that this if statement has an else clause.
This may trigger the Wparentheses warning below
when we get back up to the parent if statement. */
if (if_p != NULL)
*if_p = true;
}
else
{
/* This if statement does not have an else clause. If
NESTED_IF is true, then the then-clause is an if
statement which does have an else clause. We warn
about the potential ambiguity. */
if (nested_if)
warning_at (EXPR_LOCATION (statement), OPT_Wparentheses,
"suggest explicit braces to avoid ambiguous"
" %<else%>");
}
/* Now we're all done with the if-statement. */
finish_if_stmt (statement);
}
else
{
bool in_switch_statement_p;
unsigned char in_statement;
/* Add the condition. */
finish_switch_cond (condition, statement);
/* Parse the body of the switch-statement. */
in_switch_statement_p = parser->in_switch_statement_p;
in_statement = parser->in_statement;
parser->in_switch_statement_p = true;
parser->in_statement |= IN_SWITCH_STMT;
cp_parser_implicitly_scoped_statement (parser, NULL);
parser->in_switch_statement_p = in_switch_statement_p;
parser->in_statement = in_statement;
/* Now we're all done with the switch-statement. */
finish_switch_stmt (statement);
}
return statement;
}
break;
default:
cp_parser_error (parser, "expected selection-statement");
return error_mark_node;
}
}
/* Parse a condition.
condition:
expression
type-specifier-seq declarator = initializer-clause
type-specifier-seq declarator braced-init-list
GNU Extension:
condition:
type-specifier-seq declarator asm-specification [opt]
attributes [opt] = assignment-expression
Returns the expression that should be tested. */
static tree
cp_parser_condition (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
const char *saved_message;
int declares_class_or_enum;
/* Try the declaration first. */
cp_parser_parse_tentatively (parser);
/* New types are not allowed in the type-specifier-seq for a
condition. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in conditions");
/* Parse the type-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR,
&type_specifiers,
&declares_class_or_enum);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* If all is well, we might be looking at a declaration. */
if (!cp_parser_error_occurred (parser))
{
tree decl;
tree asm_specification;
tree attributes;
cp_declarator *declarator;
tree initializer = NULL_TREE;
/* Parse the declarator. */
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* Parse the asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* If the next token is not an `=' or '{', then we might still be
looking at an expression. For example:
if (A(a).x)
looks like a decl-specifier-seq and a declarator -- but then
there is no `=', so this is an expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
cp_parser_simulate_error (parser);
/* If we did see an `=' or '{', then we are looking at a declaration
for sure. */
if (cp_parser_parse_definitely (parser))
{
tree pushed_scope;
bool non_constant_p;
bool flags = LOOKUP_ONLYCONVERTING;
/* Create the declaration. */
decl = start_decl (declarator, &type_specifiers,
/*initialized_p=*/true,
attributes, /*prefix_attributes=*/NULL_TREE,
&pushed_scope);
/* Parse the initializer. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
initializer = cp_parser_braced_list (parser, &non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (initializer) = 1;
flags = 0;
}
else
{
/* Consume the `='. */
cp_parser_require (parser, CPP_EQ, RT_EQ);
initializer = cp_parser_initializer_clause (parser, &non_constant_p);
}
if (BRACE_ENCLOSED_INITIALIZER_P (initializer))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
/* Process the initializer. */
cp_finish_decl (decl,
initializer, !non_constant_p,
asm_specification,
flags);
if (pushed_scope)
pop_scope (pushed_scope);
return convert_from_reference (decl);
}
}
/* If we didn't even get past the declarator successfully, we are
definitely not looking at a declaration. */
else
cp_parser_abort_tentative_parse (parser);
/* Otherwise, we are looking at an expression. */
return cp_parser_expression (parser, /*cast_p=*/false, NULL);
}
/* Parses a for-statement or range-for-statement until the closing ')',
not included. */
static tree
cp_parser_for (cp_parser *parser)
{
tree init, scope, decl;
bool is_range_for;
/* Begin the for-statement. */
scope = begin_for_scope (&init);
/* Parse the initialization. */
is_range_for = cp_parser_for_init_statement (parser, &decl);
if (is_range_for)
return cp_parser_range_for (parser, scope, init, decl);
else
return cp_parser_c_for (parser, scope, init);
}
static tree
cp_parser_c_for (cp_parser *parser, tree scope, tree init)
{
/* Normal for loop */
tree condition = NULL_TREE;
tree expression = NULL_TREE;
tree stmt;
stmt = begin_for_stmt (scope, init);
/* The for-init-statement has already been parsed in
cp_parser_for_init_statement, so no work is needed here. */
finish_for_init_stmt (stmt);
/* If there's a condition, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
condition = cp_parser_condition (parser);
finish_for_cond (condition, stmt);
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* If there's an expression, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
finish_for_expr (expression, stmt);
return stmt;
}
/* Tries to parse a range-based for-statement:
range-based-for:
decl-specifier-seq declarator : expression
The decl-specifier-seq declarator and the `:' are already parsed by
cp_parser_for_init_statement. If processing_template_decl it returns a
newly created RANGE_FOR_STMT; if not, it is converted to a
regular FOR_STMT. */
static tree
cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl)
{
tree stmt, range_expr;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_non_constant_p;
range_expr = cp_parser_braced_list (parser, &expr_non_constant_p);
}
else
range_expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* If in template, STMT is converted to a normal for-statement
at instantiation. If not, it is done just ahead. */
if (processing_template_decl)
{
if (check_for_bare_parameter_packs (range_expr))
range_expr = error_mark_node;
stmt = begin_range_for_stmt (scope, init);
finish_range_for_decl (stmt, range_decl, range_expr);
if (!type_dependent_expression_p (range_expr)
/* do_auto_deduction doesn't mess with template init-lists. */
&& !BRACE_ENCLOSED_INITIALIZER_P (range_expr))
do_range_for_auto_deduction (range_decl, range_expr);
}
else
{
stmt = begin_for_stmt (scope, init);
stmt = cp_convert_range_for (stmt, range_decl, range_expr);
}
return stmt;
}
/* Subroutine of cp_convert_range_for: given the initializer expression,
builds up the range temporary. */
static tree
build_range_temp (tree range_expr)
{
tree range_type, range_temp;
/* Find out the type deduced by the declaration
`auto &&__range = range_expr'. */
range_type = cp_build_reference_type (make_auto (), true);
range_type = do_auto_deduction (range_type, range_expr,
type_uses_auto (range_type));
/* Create the __range variable. */
range_temp = build_decl (input_location, VAR_DECL,
get_identifier ("__for_range"), range_type);
TREE_USED (range_temp) = 1;
DECL_ARTIFICIAL (range_temp) = 1;
return range_temp;
}
/* Used by cp_parser_range_for in template context: we aren't going to
do a full conversion yet, but we still need to resolve auto in the
type of the for-range-declaration if present. This is basically
a shortcut version of cp_convert_range_for. */
static void
do_range_for_auto_deduction (tree decl, tree range_expr)
{
tree auto_node = type_uses_auto (TREE_TYPE (decl));
if (auto_node)
{
tree begin_dummy, end_dummy, range_temp, iter_type, iter_decl;
range_temp = convert_from_reference (build_range_temp (range_expr));
iter_type = (cp_parser_perform_range_for_lookup
(range_temp, &begin_dummy, &end_dummy));
iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE, iter_type);
iter_decl = build_x_indirect_ref (iter_decl, RO_NULL,
tf_warning_or_error);
TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl),
iter_decl, auto_node);
}
}
/* Converts a range-based for-statement into a normal
for-statement, as per the definition.
for (RANGE_DECL : RANGE_EXPR)
BLOCK
should be equivalent to:
{
auto &&__range = RANGE_EXPR;
for (auto __begin = BEGIN_EXPR, end = END_EXPR;
__begin != __end;
++__begin)
{
RANGE_DECL = *__begin;
BLOCK
}
}
If RANGE_EXPR is an array:
BEGIN_EXPR = __range
END_EXPR = __range + ARRAY_SIZE(__range)
Else if RANGE_EXPR has a member 'begin' or 'end':
BEGIN_EXPR = __range.begin()
END_EXPR = __range.end()
Else:
BEGIN_EXPR = begin(__range)
END_EXPR = end(__range);
If __range has a member 'begin' but not 'end', or vice versa, we must
still use the second alternative (it will surely fail, however).
When calling begin()/end() in the third alternative we must use
argument dependent lookup, but always considering 'std' as an associated
namespace. */
tree
cp_convert_range_for (tree statement, tree range_decl, tree range_expr)
{
tree begin, end;
tree iter_type, begin_expr, end_expr;
tree condition, expression;
if (range_decl == error_mark_node || range_expr == error_mark_node)
/* If an error happened previously do nothing or else a lot of
unhelpful errors would be issued. */
begin_expr = end_expr = iter_type = error_mark_node;
else
{
tree range_temp = build_range_temp (range_expr);
pushdecl (range_temp);
cp_finish_decl (range_temp, range_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
range_temp = convert_from_reference (range_temp);
iter_type = cp_parser_perform_range_for_lookup (range_temp,
&begin_expr, &end_expr);
}
/* The new for initialization statement. */
begin = build_decl (input_location, VAR_DECL,
get_identifier ("__for_begin"), iter_type);
TREE_USED (begin) = 1;
DECL_ARTIFICIAL (begin) = 1;
pushdecl (begin);
cp_finish_decl (begin, begin_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
end = build_decl (input_location, VAR_DECL,
get_identifier ("__for_end"), iter_type);
TREE_USED (end) = 1;
DECL_ARTIFICIAL (end) = 1;
pushdecl (end);
cp_finish_decl (end, end_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
finish_for_init_stmt (statement);
/* The new for condition. */
condition = build_x_binary_op (NE_EXPR,
begin, ERROR_MARK,
end, ERROR_MARK,
NULL, tf_warning_or_error);
finish_for_cond (condition, statement);
/* The new increment expression. */
expression = finish_unary_op_expr (PREINCREMENT_EXPR, begin);
finish_for_expr (expression, statement);
/* The declaration is initialized with *__begin inside the loop body. */
cp_finish_decl (range_decl,
build_x_indirect_ref (begin, RO_NULL, tf_warning_or_error),
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
return statement;
}
/* Solves BEGIN_EXPR and END_EXPR as described in cp_convert_range_for.
We need to solve both at the same time because the method used
depends on the existence of members begin or end.
Returns the type deduced for the iterator expression. */
static tree
cp_parser_perform_range_for_lookup (tree range, tree *begin, tree *end)
{
if (error_operand_p (range))
{
*begin = *end = error_mark_node;
return error_mark_node;
}
if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (range))))
{
error ("range-based %<for%> expression of type %qT "
"has incomplete type", TREE_TYPE (range));
*begin = *end = error_mark_node;
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (range)) == ARRAY_TYPE)
{
/* If RANGE is an array, we will use pointer arithmetic. */
*begin = range;
*end = build_binary_op (input_location, PLUS_EXPR,
range,
array_type_nelts_top (TREE_TYPE (range)),
0);
return build_pointer_type (TREE_TYPE (TREE_TYPE (range)));
}
else
{
/* If it is not an array, we must do a bit of magic. */
tree id_begin, id_end;
tree member_begin, member_end;
*begin = *end = error_mark_node;
id_begin = get_identifier ("begin");
id_end = get_identifier ("end");
member_begin = lookup_member (TREE_TYPE (range), id_begin,
/*protect=*/2, /*want_type=*/false,
tf_warning_or_error);
member_end = lookup_member (TREE_TYPE (range), id_end,
/*protect=*/2, /*want_type=*/false,
tf_warning_or_error);
if (member_begin != NULL_TREE || member_end != NULL_TREE)
{
/* Use the member functions. */
if (member_begin != NULL_TREE)
*begin = cp_parser_range_for_member_function (range, id_begin);
else
error ("range-based %<for%> expression of type %qT has an "
"%<end%> member but not a %<begin%>", TREE_TYPE (range));
if (member_end != NULL_TREE)
*end = cp_parser_range_for_member_function (range, id_end);
else
error ("range-based %<for%> expression of type %qT has a "
"%<begin%> member but not an %<end%>", TREE_TYPE (range));
}
else
{
/* Use global functions with ADL. */
VEC(tree,gc) *vec;
vec = make_tree_vector ();
VEC_safe_push (tree, gc, vec, range);
member_begin = perform_koenig_lookup (id_begin, vec,
/*include_std=*/true,
tf_warning_or_error);
*begin = finish_call_expr (member_begin, &vec, false, true,
tf_warning_or_error);
member_end = perform_koenig_lookup (id_end, vec,
/*include_std=*/true,
tf_warning_or_error);
*end = finish_call_expr (member_end, &vec, false, true,
tf_warning_or_error);
release_tree_vector (vec);
}
/* Last common checks. */
if (*begin == error_mark_node || *end == error_mark_node)
{
/* If one of the expressions is an error do no more checks. */
*begin = *end = error_mark_node;
return error_mark_node;
}
else
{
tree iter_type = cv_unqualified (TREE_TYPE (*begin));
/* The unqualified type of the __begin and __end temporaries should
be the same, as required by the multiple auto declaration. */
if (!same_type_p (iter_type, cv_unqualified (TREE_TYPE (*end))))
error ("inconsistent begin/end types in range-based %<for%> "
"statement: %qT and %qT",
TREE_TYPE (*begin), TREE_TYPE (*end));
return iter_type;
}
}
}
/* Helper function for cp_parser_perform_range_for_lookup.
Builds a tree for RANGE.IDENTIFIER(). */
static tree
cp_parser_range_for_member_function (tree range, tree identifier)
{
tree member, res;
VEC(tree,gc) *vec;
member = finish_class_member_access_expr (range, identifier,
false, tf_warning_or_error);
if (member == error_mark_node)
return error_mark_node;
vec = make_tree_vector ();
res = finish_call_expr (member, &vec,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
tf_warning_or_error);
release_tree_vector (vec);
return res;
}
/* Parse an iteration-statement.
iteration-statement:
while ( condition ) statement
do statement while ( expression ) ;
for ( for-init-statement condition [opt] ; expression [opt] )
statement
Returns the new WHILE_STMT, DO_STMT, FOR_STMT or RANGE_FOR_STMT. */
static tree
cp_parser_iteration_statement (cp_parser* parser)
{
cp_token *token;
enum rid keyword;
tree statement;
unsigned char in_statement;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_INTERATION);
if (!token)
return error_mark_node;
/* Remember whether or not we are already within an iteration
statement. */
in_statement = parser->in_statement;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_WHILE:
{
tree condition;
/* Begin the while-statement. */
statement = begin_while_stmt ();
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the condition. */
condition = cp_parser_condition (parser);
finish_while_stmt_cond (condition, statement);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Parse the dependent statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the while-statement. */
finish_while_stmt (statement);
}
break;
case RID_DO:
{
tree expression;
/* Begin the do-statement. */
statement = begin_do_stmt ();
/* Parse the body of the do-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_implicitly_scoped_statement (parser, NULL);
parser->in_statement = in_statement;
finish_do_body (statement);
/* Look for the `while' keyword. */
cp_parser_require_keyword (parser, RID_WHILE, RT_WHILE);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* We're done with the do-statement. */
finish_do_stmt (expression, statement);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
break;
case RID_FOR:
{
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
statement = cp_parser_for (parser);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Parse the body of the for-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the for-statement. */
finish_for_stmt (statement);
}
break;
default:
cp_parser_error (parser, "expected iteration-statement");
statement = error_mark_node;
break;
}
return statement;
}
/* Parse a for-init-statement or the declarator of a range-based-for.
Returns true if a range-based-for declaration is seen.
for-init-statement:
expression-statement
simple-declaration */
static bool
cp_parser_for_init_statement (cp_parser* parser, tree *decl)
{
/* If the next token is a `;', then we have an empty
expression-statement. Grammatically, this is also a
simple-declaration, but an invalid one, because it does not
declare anything. Therefore, if we did not handle this case
specially, we would issue an error message about an invalid
declaration. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
bool is_range_for = false;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* We're going to speculatively look for a declaration, falling back
to an expression, if necessary. */
cp_parser_parse_tentatively (parser);
/* Parse the declaration. */
cp_parser_simple_declaration (parser,
/*function_definition_allowed_p=*/false,
decl);
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* It is a range-for, consume the ':' */
cp_lexer_consume_token (parser->lexer);
is_range_for = true;
if (cxx_dialect < cxx0x)
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"range-based %<for%> loops are not allowed "
"in C++98 mode");
*decl = error_mark_node;
}
}
else
/* The ';' is not consumed yet because we told
cp_parser_simple_declaration not to. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (cp_parser_parse_definitely (parser))
return is_range_for;
/* If the tentative parse failed, then we shall need to look for an
expression-statement. */
}
/* If we are here, it is an expression-statement. */
cp_parser_expression_statement (parser, NULL_TREE);
return false;
}
/* Parse a jump-statement.
jump-statement:
break ;
continue ;
return expression [opt] ;
return braced-init-list ;
goto identifier ;
GNU extension:
jump-statement:
goto * expression ;
Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */
static tree
cp_parser_jump_statement (cp_parser* parser)
{
tree statement = error_mark_node;
cp_token *token;
enum rid keyword;
unsigned char in_statement;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_JUMP);
if (!token)
return error_mark_node;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_BREAK:
in_statement = parser->in_statement & ~IN_IF_STMT;
switch (in_statement)
{
case 0:
error_at (token->location, "break statement not within loop or switch");
break;
default:
gcc_assert ((in_statement & IN_SWITCH_STMT)
|| in_statement == IN_ITERATION_STMT);
statement = finish_break_stmt ();
break;
case IN_OMP_BLOCK:
error_at (token->location, "invalid exit from OpenMP structured block");
break;
case IN_OMP_FOR:
error_at (token->location, "break statement used with OpenMP for loop");
break;
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
case RID_CONTINUE:
switch (parser->in_statement & ~(IN_SWITCH_STMT | IN_IF_STMT))
{
case 0:
error_at (token->location, "continue statement not within a loop");
break;
case IN_ITERATION_STMT:
case IN_OMP_FOR:
statement = finish_continue_stmt ();
break;
case IN_OMP_BLOCK:
error_at (token->location, "invalid exit from OpenMP structured block");
break;
default:
gcc_unreachable ();
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
case RID_RETURN:
{
tree expr;
bool expr_non_constant_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expr = cp_parser_braced_list (parser, &expr_non_constant_p);
}
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
else
/* If the next token is a `;', then there is no
expression. */
expr = NULL_TREE;
/* Build the return-statement. */
statement = finish_return_stmt (expr);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
break;
case RID_GOTO:
/* Create the goto-statement. */
if (cp_lexer_next_token_is (parser->lexer, CPP_MULT))
{
/* Issue a warning about this use of a GNU extension. */
pedwarn (token->location, OPT_pedantic, "ISO C++ forbids computed gotos");
/* Consume the '*' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the dependent expression. */
finish_goto_stmt (cp_parser_expression (parser, /*cast_p=*/false, NULL));
}
else
finish_goto_stmt (cp_parser_identifier (parser));
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
default:
cp_parser_error (parser, "expected jump-statement");
break;
}
return statement;
}
/* Parse a declaration-statement.
declaration-statement:
block-declaration */
static void
cp_parser_declaration_statement (cp_parser* parser)
{
void *p;
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* Parse the block-declaration. */
cp_parser_block_declaration (parser, /*statement_p=*/true);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
/* Finish off the statement. */
finish_stmt ();
}
/* Some dependent statements (like `if (cond) statement'), are
implicitly in their own scope. In other words, if the statement is
a single statement (as opposed to a compound-statement), it is
none-the-less treated as if it were enclosed in braces. Any
declarations appearing in the dependent statement are out of scope
after control passes that point. This function parses a statement,
but ensures that is in its own scope, even if it is not a
compound-statement.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in
braces and has an else clause. This is used to implement
-Wparentheses.
Returns the new statement. */
static tree
cp_parser_implicitly_scoped_statement (cp_parser* parser, bool *if_p)
{
tree statement;
if (if_p != NULL)
*if_p = false;
/* Mark if () ; with a special NOP_EXPR. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer);
statement = add_stmt (build_empty_stmt (loc));
}
/* if a compound is opened, we simply parse the statement directly. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
statement = cp_parser_compound_statement (parser, NULL, false, false);
/* If the token is not a `{', then we must take special action. */
else
{
/* Create a compound-statement. */
statement = begin_compound_stmt (0);
/* Parse the dependent-statement. */
cp_parser_statement (parser, NULL_TREE, false, if_p);
/* Finish the dummy compound-statement. */
finish_compound_stmt (statement);
}
/* Return the statement. */
return statement;
}
/* For some dependent statements (like `while (cond) statement'), we
have already created a scope. Therefore, even if the dependent
statement is a compound-statement, we do not want to create another
scope. */
static void
cp_parser_already_scoped_statement (cp_parser* parser)
{
/* If the token is a `{', then we must take special action. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
cp_parser_statement (parser, NULL_TREE, false, NULL);
else
{
/* Avoid calling cp_parser_compound_statement, so that we
don't create a new scope. Do everything else by hand. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* If the next keyword is `__label__' we have a label declaration. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
/* Parse an (optional) statement-seq. */
cp_parser_statement_seq_opt (parser, NULL_TREE);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
}
/* Declarations [gram.dcl.dcl] */
/* Parse an optional declaration-sequence.
declaration-seq:
declaration
declaration-seq declaration */
static void
cp_parser_declaration_seq_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
if (token->type == CPP_SEMICOLON)
{
/* A declaration consisting of a single semicolon is
invalid. Allow it unless we're being pedantic. */
cp_lexer_consume_token (parser->lexer);
if (!in_system_header)
pedwarn (input_location, OPT_pedantic, "extra %<;%>");
continue;
}
/* If we're entering or exiting a region that's implicitly
extern "C", modify the lang context appropriately. */
if (!parser->implicit_extern_c && token->implicit_extern_c)
{
push_lang_context (lang_name_c);
parser->implicit_extern_c = true;
}
else if (parser->implicit_extern_c && !token->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
if (token->type == CPP_PRAGMA)
{
/* A top-level declaration can consist solely of a #pragma.
A nested declaration cannot, so this is done here and not
in cp_parser_declaration. (A #pragma at block scope is
handled in cp_parser_statement.) */
cp_parser_pragma (parser, pragma_external);
continue;
}
/* Parse the declaration itself. */
cp_parser_declaration (parser);
}
}
/* Parse a declaration.
declaration:
block-declaration
function-definition
template-declaration
explicit-instantiation
explicit-specialization
linkage-specification
namespace-definition
GNU extension:
declaration:
__extension__ declaration */
static void
cp_parser_declaration (cp_parser* parser)
{
cp_token token1;
cp_token token2;
int saved_pedantic;
void *p;
tree attributes = NULL_TREE;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_declaration (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Try to figure out what kind of declaration is present. */
token1 = *cp_lexer_peek_token (parser->lexer);
if (token1.type != CPP_EOF)
token2 = *cp_lexer_peek_nth_token (parser->lexer, 2);
else
{
token2.type = CPP_EOF;
token2.keyword = RID_MAX;
}
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token1.keyword == RID_EXTERN
&& cp_parser_is_pure_string_literal (&token2))
cp_parser_linkage_specification (parser);
/* If the next token is `template', then we have either a template
declaration, an explicit instantiation, or an explicit
specialization. */
else if (token1.keyword == RID_TEMPLATE)
{
/* `template <>' indicates a template specialization. */
if (token2.type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
/* `template <' indicates a template declaration. */
else if (token2.type == CPP_LESS)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* Anything else must be an explicit instantiation. */
else
cp_parser_explicit_instantiation (parser);
}
/* If the next token is `export', then we have a template
declaration. */
else if (token1.keyword == RID_EXPORT)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* If the next token is `extern', 'static' or 'inline' and the one
after that is `template', we have a GNU extended explicit
instantiation directive. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& (token1.keyword == RID_EXTERN
|| token1.keyword == RID_STATIC
|| token1.keyword == RID_INLINE)
&& token2.keyword == RID_TEMPLATE)
cp_parser_explicit_instantiation (parser);
/* If the next token is `namespace', check for a named or unnamed
namespace definition. */
else if (token1.keyword == RID_NAMESPACE
&& (/* A named namespace definition. */
(token2.type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_EQ))
/* An unnamed namespace definition. */
|| token2.type == CPP_OPEN_BRACE
|| token2.keyword == RID_ATTRIBUTE))
cp_parser_namespace_definition (parser);
/* An inline (associated) namespace definition. */
else if (token1.keyword == RID_INLINE
&& token2.keyword == RID_NAMESPACE)
cp_parser_namespace_definition (parser);
/* Objective-C++ declaration/definition. */
else if (c_dialect_objc () && OBJC_IS_AT_KEYWORD (token1.keyword))
cp_parser_objc_declaration (parser, NULL_TREE);
else if (c_dialect_objc ()
&& token1.keyword == RID_ATTRIBUTE
&& cp_parser_objc_valid_prefix_attributes (parser, &attributes))
cp_parser_objc_declaration (parser, attributes);
/* We must have either a block declaration or a function
definition. */
else
/* Try to parse a block-declaration, or a function-definition. */
cp_parser_block_declaration (parser, /*statement_p=*/false);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
}
/* Parse a block-declaration.
block-declaration:
simple-declaration
asm-definition
namespace-alias-definition
using-declaration
using-directive
GNU Extension:
block-declaration:
__extension__ block-declaration
C++0x Extension:
block-declaration:
static_assert-declaration
If STATEMENT_P is TRUE, then this block-declaration is occurring as
part of a declaration-statement. */
static void
cp_parser_block_declaration (cp_parser *parser,
bool statement_p)
{
cp_token *token1;
int saved_pedantic;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_block_declaration (parser, statement_p);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Peek at the next token to figure out which kind of declaration is
present. */
token1 = cp_lexer_peek_token (parser->lexer);
/* If the next keyword is `asm', we have an asm-definition. */
if (token1->keyword == RID_ASM)
{
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
cp_parser_asm_definition (parser);
}
/* If the next keyword is `namespace', we have a
namespace-alias-definition. */
else if (token1->keyword == RID_NAMESPACE)
cp_parser_namespace_alias_definition (parser);
/* If the next keyword is `using', we have a
using-declaration, a using-directive, or an alias-declaration. */
else if (token1->keyword == RID_USING)
{
cp_token *token2;
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
/* If the token after `using' is `namespace', then we have a
using-directive. */
token2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token2->keyword == RID_NAMESPACE)
cp_parser_using_directive (parser);
/* If the second token after 'using' is '=', then we have an
alias-declaration. */
else if (cxx_dialect >= cxx0x
&& token2->type == CPP_NAME
&& ((cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ)
|| (cp_lexer_peek_nth_token (parser->lexer, 3)->keyword
== RID_ATTRIBUTE)))
cp_parser_alias_declaration (parser);
/* Otherwise, it's a using-declaration. */
else
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
}
/* If the next keyword is `__label__' we have a misplaced label
declaration. */
else if (token1->keyword == RID_LABEL)
{
cp_lexer_consume_token (parser->lexer);
error_at (token1->location, "%<__label__%> not at the beginning of a block");
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
/* If the next token is `static_assert' we have a static assertion. */
else if (token1->keyword == RID_STATIC_ASSERT)
cp_parser_static_assert (parser, /*member_p=*/false);
/* Anything else must be a simple-declaration. */
else
cp_parser_simple_declaration (parser, !statement_p,
/*maybe_range_for_decl*/NULL);
}
/* Parse a simple-declaration.
simple-declaration:
decl-specifier-seq [opt] init-declarator-list [opt] ;
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a
function-definition as a simple-declaration.
If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the
parsed declaration if it is an uninitialized single declarator not followed
by a `;', or to error_mark_node otherwise. Either way, the trailing `;',
if present, will not be consumed. */
static void
cp_parser_simple_declaration (cp_parser* parser,
bool function_definition_allowed_p,
tree *maybe_range_for_decl)
{
cp_decl_specifier_seq decl_specifiers;
int declares_class_or_enum;
bool saw_declarator;
if (maybe_range_for_decl)
*maybe_range_for_decl = NULL_TREE;
/* Defer access checks until we know what is being declared; the
checks for names appearing in the decl-specifier-seq should be
done as if we were in the scope of the thing being declared. */
push_deferring_access_checks (dk_deferred);
/* Parse the decl-specifier-seq. We have to keep track of whether
or not the decl-specifier-seq declares a named class or
enumeration type, since that is the only case in which the
init-declarator-list is allowed to be empty.
[dcl.dcl]
In a simple-declaration, the optional init-declarator-list can be
omitted only when declaring a class or enumeration, that is when
the decl-specifier-seq contains either a class-specifier, an
elaborated-type-specifier, or an enum-specifier. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* We no longer need to defer access checks. */
stop_deferring_access_checks ();
/* In a block scope, a valid declaration must always have a
decl-specifier-seq. By not trying to parse declarators, we can
resolve the declaration/expression ambiguity more quickly. */
if (!function_definition_allowed_p
&& !decl_specifiers.any_specifiers_p)
{
cp_parser_error (parser, "expected declaration");
goto done;
}
/* If the next two tokens are both identifiers, the code is
erroneous. The usual cause of this situation is code like:
T t;
where "T" should name a type -- but does not. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
{
/* If parsing tentatively, we should commit; we really are
looking at a declaration. */
cp_parser_commit_to_tentative_parse (parser);
/* Give up. */
goto done;
}
/* If we have seen at least one decl-specifier, and the next token
is not a parenthesis, then we must be looking at a declaration.
(After "int (" we might be looking at a functional cast.) */
if (decl_specifiers.any_specifiers_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& !cp_parser_error_occurred (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Keep going until we hit the `;' at the end of the simple
declaration. */
saw_declarator = false;
while (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
cp_token *token;
bool function_definition_p;
tree decl;
if (saw_declarator)
{
/* If we are processing next declarator, coma is expected */
token = cp_lexer_peek_token (parser->lexer);
gcc_assert (token->type == CPP_COMMA);
cp_lexer_consume_token (parser->lexer);
if (maybe_range_for_decl)
*maybe_range_for_decl = error_mark_node;
}
else
saw_declarator = true;
/* Parse the init-declarator. */
decl = cp_parser_init_declarator (parser, &decl_specifiers,
/*checks=*/NULL,
function_definition_allowed_p,
/*member_p=*/false,
declares_class_or_enum,
&function_definition_p,
maybe_range_for_decl);
/* If an error occurred while parsing tentatively, exit quickly.
(That usually happens when in the body of a function; each
statement is treated as a declaration-statement until proven
otherwise.) */
if (cp_parser_error_occurred (parser))
goto done;
/* Handle function definitions specially. */
if (function_definition_p)
{
/* If the next token is a `,', then we are probably
processing something like:
void f() {}, *p;
which is erroneous. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"mixing"
" declarations and function-definitions is forbidden");
}
/* Otherwise, we're done with the list of declarators. */
else
{
pop_deferring_access_checks ();
return;
}
}
if (maybe_range_for_decl && *maybe_range_for_decl == NULL_TREE)
*maybe_range_for_decl = decl;
/* The next token should be either a `,' or a `;'. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', there are more declarators to come. */
if (token->type == CPP_COMMA)
/* will be consumed next time around */;
/* If it's a `;', we are done. */
else if (token->type == CPP_SEMICOLON || maybe_range_for_decl)
break;
/* Anything else is an error. */
else
{
/* If we have already issued an error message we don't need
to issue another one. */
if (decl != error_mark_node
|| cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_error (parser, "expected %<,%> or %<;%>");
/* Skip tokens until we reach the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
goto done;
}
/* After the first time around, a function-definition is not
allowed -- even if it was OK at first. For example:
int i, f() {}
is not valid. */
function_definition_allowed_p = false;
}
/* Issue an error message if no declarators are present, and the
decl-specifier-seq does not itself declare a class or
enumeration. */
if (!saw_declarator)
{
if (cp_parser_declares_only_class_p (parser))
shadow_tag (&decl_specifiers);
/* Perform any deferred access checks. */
perform_deferred_access_checks ();
}
/* Consume the `;'. */
if (!maybe_range_for_decl)
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
done:
pop_deferring_access_checks ();
}
/* Parse a decl-specifier-seq.
decl-specifier-seq:
decl-specifier-seq [opt] decl-specifier
decl-specifier:
storage-class-specifier
type-specifier
function-specifier
friend
typedef
GNU Extension:
decl-specifier:
attributes
Set *DECL_SPECS to a representation of the decl-specifier-seq.
The parser flags FLAGS is used to control type-specifier parsing.
*DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following
flags:
1: one of the decl-specifiers is an elaborated-type-specifier
(i.e., a type declaration)
2: one of the decl-specifiers is an enum-specifier or a
class-specifier (i.e., a type definition)
*/
static void
cp_parser_decl_specifier_seq (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
int* declares_class_or_enum)
{
bool constructor_possible_p = !parser->in_declarator_p;
cp_token *start_token = NULL;
/* Clear DECL_SPECS. */
clear_decl_specs (decl_specs);
/* Assume no class or enumeration type is declared. */
*declares_class_or_enum = 0;
/* Keep reading specifiers until there are no more to read. */
while (true)
{
bool constructor_p;
bool found_decl_spec;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Save the first token of the decl spec list for error
reporting. */
if (!start_token)
start_token = token;
/* Handle attributes. */
if (token->keyword == RID_ATTRIBUTE)
{
/* Parse the attributes. */
decl_specs->attributes
= chainon (decl_specs->attributes,
cp_parser_attributes_opt (parser));
continue;
}
/* Assume we will find a decl-specifier keyword. */
found_decl_spec = true;
/* If the next token is an appropriate keyword, we can simply
add it to the list. */
switch (token->keyword)
{
/* decl-specifier:
friend
constexpr */
case RID_FRIEND:
if (!at_class_scope_p ())
{
error_at (token->location, "%<friend%> used outside of class");
cp_lexer_purge_token (parser->lexer);
}
else
{
++decl_specs->specs[(int) ds_friend];
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
break;
case RID_CONSTEXPR:
++decl_specs->specs[(int) ds_constexpr];
cp_lexer_consume_token (parser->lexer);
break;
/* function-specifier:
inline
virtual
explicit */
case RID_INLINE:
case RID_VIRTUAL:
case RID_EXPLICIT:
cp_parser_function_specifier_opt (parser, decl_specs);
break;
/* decl-specifier:
typedef */
case RID_TYPEDEF:
++decl_specs->specs[(int) ds_typedef];
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* A constructor declarator cannot appear in a typedef. */
constructor_possible_p = false;
/* The "typedef" keyword can only occur in a declaration; we
may as well commit at this point. */
cp_parser_commit_to_tentative_parse (parser);
if (decl_specs->storage_class != sc_none)
decl_specs->conflicting_specifiers_p = true;
break;
/* storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
thread */
case RID_AUTO:
if (cxx_dialect == cxx98)
{
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* Complain about `auto' as a storage specifier, if
we're complaining about C++0x compatibility. */
warning_at (token->location, OPT_Wc__0x_compat, "%<auto%>"
" changes meaning in C++11; please remove it");
/* Set the storage class anyway. */
cp_parser_set_storage_class (parser, decl_specs, RID_AUTO,
token->location);
}
else
/* C++0x auto type-specifier. */
found_decl_spec = false;
break;
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
cp_parser_set_storage_class (parser, decl_specs, token->keyword,
token->location);
break;
case RID_THREAD:
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
++decl_specs->specs[(int) ds_thread];
break;
default:
/* We did not yet find a decl-specifier yet. */
found_decl_spec = false;
break;
}
if (found_decl_spec
&& (flags & CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR)
&& token->keyword != RID_CONSTEXPR)
error ("decl-specifier invalid in condition");
/* Constructors are a special case. The `S' in `S()' is not a
decl-specifier; it is the beginning of the declarator. */
constructor_p
= (!found_decl_spec
&& constructor_possible_p
&& (cp_parser_constructor_declarator_p
(parser, decl_specs->specs[(int) ds_friend] != 0)));
/* If we don't have a DECL_SPEC yet, then we must be looking at
a type-specifier. */
if (!found_decl_spec && !constructor_p)
{
int decl_spec_declares_class_or_enum;
bool is_cv_qualifier;
tree type_spec;
type_spec
= cp_parser_type_specifier (parser, flags,
decl_specs,
/*is_declaration=*/true,
&decl_spec_declares_class_or_enum,
&is_cv_qualifier);
*declares_class_or_enum |= decl_spec_declares_class_or_enum;
/* If this type-specifier referenced a user-defined type
(a typedef, class-name, etc.), then we can't allow any
more such type-specifiers henceforth.
[dcl.spec]
The longest sequence of decl-specifiers that could
possibly be a type name is taken as the
decl-specifier-seq of a declaration. The sequence shall
be self-consistent as described below.
[dcl.type]
As a general rule, at most one type-specifier is allowed
in the complete decl-specifier-seq of a declaration. The
only exceptions are the following:
-- const or volatile can be combined with any other
type-specifier.
-- signed or unsigned can be combined with char, long,
short, or int.
-- ..
Example:
typedef char* Pc;
void g (const int Pc);
Here, Pc is *not* part of the decl-specifier seq; it's
the declarator. Therefore, once we see a type-specifier
(other than a cv-qualifier), we forbid any additional
user-defined types. We *do* still allow things like `int
int' to be considered a decl-specifier-seq, and issue the
error message later. */
if (type_spec && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
/* A constructor declarator cannot follow a type-specifier. */
if (type_spec)
{
constructor_possible_p = false;
found_decl_spec = true;
if (!is_cv_qualifier)
decl_specs->any_type_specifiers_p = true;
}
}
/* If we still do not have a DECL_SPEC, then there are no more
decl-specifiers. */
if (!found_decl_spec)
break;
decl_specs->any_specifiers_p = true;
/* After we see one decl-specifier, further decl-specifiers are
always optional. */
flags |= CP_PARSER_FLAGS_OPTIONAL;
}
cp_parser_check_decl_spec (decl_specs, start_token->location);
/* Don't allow a friend specifier with a class definition. */
if (decl_specs->specs[(int) ds_friend] != 0
&& (*declares_class_or_enum & 2))
error_at (start_token->location,
"class definition may not be declared a friend");
}
/* Parse an (optional) storage-class-specifier.
storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
storage-class-specifier:
thread
Returns an IDENTIFIER_NODE corresponding to the keyword used. */
static tree
cp_parser_storage_class_specifier_opt (cp_parser* parser)
{
switch (cp_lexer_peek_token (parser->lexer)->keyword)
{
case RID_AUTO:
if (cxx_dialect != cxx98)
return NULL_TREE;
/* Fall through for C++98. */
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
default:
return NULL_TREE;
}
}
/* Parse an (optional) function-specifier.
function-specifier:
inline
virtual
explicit
Returns an IDENTIFIER_NODE corresponding to the keyword used.
Updates DECL_SPECS, if it is non-NULL. */
static tree
cp_parser_function_specifier_opt (cp_parser* parser,
cp_decl_specifier_seq *decl_specs)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->keyword)
{
case RID_INLINE:
if (decl_specs)
++decl_specs->specs[(int) ds_inline];
break;
case RID_VIRTUAL:
/* 14.5.2.3 [temp.mem]
A member function template shall not be virtual. */
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
error_at (token->location, "templates may not be %<virtual%>");
else if (decl_specs)
++decl_specs->specs[(int) ds_virtual];
break;
case RID_EXPLICIT:
if (decl_specs)
++decl_specs->specs[(int) ds_explicit];
break;
default:
return NULL_TREE;
}
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* Parse a linkage-specification.
linkage-specification:
extern string-literal { declaration-seq [opt] }
extern string-literal declaration */
static void
cp_parser_linkage_specification (cp_parser* parser)
{
tree linkage;
/* Look for the `extern' keyword. */
cp_parser_require_keyword (parser, RID_EXTERN, RT_EXTERN);
/* Look for the string-literal. */
linkage = cp_parser_string_literal (parser, false, false);
/* Transform the literal into an identifier. If the literal is a
wide-character string, or contains embedded NULs, then we can't
handle it as the user wants. */
if (strlen (TREE_STRING_POINTER (linkage))
!= (size_t) (TREE_STRING_LENGTH (linkage) - 1))
{
cp_parser_error (parser, "invalid linkage-specification");
/* Assume C++ linkage. */
linkage = lang_name_cplusplus;
}
else
linkage = get_identifier (TREE_STRING_POINTER (linkage));
/* We're now using the new linkage. */
push_lang_context (linkage);
/* If the next token is a `{', then we're using the first
production. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the declarations. */
cp_parser_declaration_seq_opt (parser);
/* Look for the closing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* Otherwise, there's just one declaration. */
else
{
bool saved_in_unbraced_linkage_specification_p;
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = true;
cp_parser_declaration (parser);
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
}
/* We're done with the linkage-specification. */
pop_lang_context ();
}
/* Parse a static_assert-declaration.
static_assert-declaration:
static_assert ( constant-expression , string-literal ) ;
If MEMBER_P, this static_assert is a class member. */
static void
cp_parser_static_assert(cp_parser *parser, bool member_p)
{
tree condition;
tree message;
cp_token *token;
location_t saved_loc;
bool dummy;
/* Peek at the `static_assert' token so we can keep track of exactly
where the static assertion started. */
token = cp_lexer_peek_token (parser->lexer);
saved_loc = token->location;
/* Look for the `static_assert' keyword. */
if (!cp_parser_require_keyword (parser, RID_STATIC_ASSERT,
RT_STATIC_ASSERT))
return;
/* We know we are in a static assertion; commit to any tentative
parse. */
if (cp_parser_parsing_tentatively (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Parse the `(' starting the static assertion condition. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the constant-expression. Allow a non-constant expression
here in order to give better diagnostics in finish_static_assert. */
condition =
cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
/*non_constant_p=*/&dummy);
/* Parse the separating `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Parse the string-literal message. */
message = cp_parser_string_literal (parser,
/*translate=*/false,
/*wide_ok=*/true);
/* A `)' completes the static assertion. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
/* A semicolon terminates the declaration. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* Complete the static assertion, which may mean either processing
the static assert now or saving it for template instantiation. */
finish_static_assert (condition, message, saved_loc, member_p);
}
/* Parse a `decltype' type. Returns the type.
simple-type-specifier:
decltype ( expression ) */
static tree
cp_parser_decltype (cp_parser *parser)
{
tree expr;
bool id_expression_or_member_access_p = false;
const char *saved_message;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
cp_token *id_expr_start_token;
cp_token *start_token = cp_lexer_peek_token (parser->lexer);
if (start_token->type == CPP_DECLTYPE)
{
/* Already parsed. */
cp_lexer_consume_token (parser->lexer);
return start_token->u.value;
}
/* Look for the `decltype' token. */
if (!cp_parser_require_keyword (parser, RID_DECLTYPE, RT_DECLTYPE))
return error_mark_node;
/* Types cannot be defined in a `decltype' expression. Save away the
old message. */
saved_message = parser->type_definition_forbidden_message;
/* And create the new one. */
parser->type_definition_forbidden_message
= G_("types may not be defined in %<decltype%> expressions");
/* The restrictions on constant-expressions do not apply inside
decltype expressions. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
/* Do not actually evaluate the expression. */
++cp_unevaluated_operand;
/* Do not warn about problems with the expression. */
++c_inhibit_evaluation_warnings;
/* Parse the opening `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return error_mark_node;
/* First, try parsing an id-expression. */
id_expr_start_token = cp_lexer_peek_token (parser->lexer);
cp_parser_parse_tentatively (parser);
expr = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
if (!cp_parser_error_occurred (parser) && expr != error_mark_node)
{
bool non_integral_constant_expression_p = false;
tree id_expression = expr;
cp_id_kind idk;
const char *error_msg;
if (TREE_CODE (expr) == IDENTIFIER_NODE)
/* Lookup the name we got back from the id-expression. */
expr = cp_parser_lookup_name (parser, expr,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
id_expr_start_token->location);
if (expr
&& expr != error_mark_node
&& TREE_CODE (expr) != TEMPLATE_ID_EXPR
&& TREE_CODE (expr) != TYPE_DECL
&& (TREE_CODE (expr) != BIT_NOT_EXPR
|| !TYPE_P (TREE_OPERAND (expr, 0)))
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
{
/* Complete lookup of the id-expression. */
expr = (finish_id_expression
(id_expression, expr, parser->scope, &idk,
/*integral_constant_expression_p=*/false,
/*allow_non_integral_constant_expression_p=*/true,
&non_integral_constant_expression_p,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
id_expr_start_token->location));
if (expr == error_mark_node)
/* We found an id-expression, but it was something that we
should not have found. This is an error, not something
we can recover from, so note that we found an
id-expression and we'll recover as gracefully as
possible. */
id_expression_or_member_access_p = true;
}
if (expr
&& expr != error_mark_node
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
/* We have an id-expression. */
id_expression_or_member_access_p = true;
}
if (!id_expression_or_member_access_p)
{
/* Abort the id-expression parse. */
cp_parser_abort_tentative_parse (parser);
/* Parsing tentatively, again. */
cp_parser_parse_tentatively (parser);
/* Parse a class member access. */
expr = cp_parser_postfix_expression (parser, /*address_p=*/false,
/*cast_p=*/false,
/*member_access_only_p=*/true, NULL);
if (expr
&& expr != error_mark_node
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
/* We have an id-expression. */
id_expression_or_member_access_p = true;
}
if (id_expression_or_member_access_p)
/* We have parsed the complete id-expression or member access. */
cp_parser_parse_definitely (parser);
else
{
bool saved_greater_than_is_operator_p;
/* Abort our attempt to parse an id-expression or member access
expression. */
cp_parser_abort_tentative_parse (parser);
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Parse a full expression. */
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* The `>' token might be the end of a template-id or
template-parameter-list now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
}
/* Go back to evaluating expressions. */
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
/* Restore the old message and the integral constant expression
flags. */
parser->type_definition_forbidden_message = saved_message;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
/* Parse to the closing `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
return error_mark_node;
}
expr = finish_decltype_type (expr, id_expression_or_member_access_p,
tf_warning_or_error);
/* Replace the decltype with a CPP_DECLTYPE so we don't need to parse
it again. */
start_token->type = CPP_DECLTYPE;
start_token->u.value = expr;
start_token->keyword = RID_MAX;
cp_lexer_purge_tokens_after (parser->lexer, start_token);
return expr;
}
/* Special member functions [gram.special] */
/* Parse a conversion-function-id.
conversion-function-id:
operator conversion-type-id
Returns an IDENTIFIER_NODE representing the operator. */
static tree
cp_parser_conversion_function_id (cp_parser* parser)
{
tree type;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree pushed_scope = NULL_TREE;
/* Look for the `operator' token. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR))
return error_mark_node;
/* When we parse the conversion-type-id, the current scope will be
reset. However, we need that information in able to look up the
conversion function later, so we save it here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We must enter the scope of the class so that the names of
entities declared within the class are available in the
conversion-type-id. For example, consider:
struct S {
typedef int I;
operator I();
};
S::operator I() { ... }
In order to see that `I' is a type-name in the definition, we
must be in the scope of `S'. */
if (saved_scope)
pushed_scope = push_scope (saved_scope);
/* Parse the conversion-type-id. */
type = cp_parser_conversion_type_id (parser);
/* Leave the scope of the class, if any. */
if (pushed_scope)
pop_scope (pushed_scope);
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If the TYPE is invalid, indicate failure. */
if (type == error_mark_node)
return error_mark_node;
return mangle_conv_op_name_for_type (type);
}
/* Parse a conversion-type-id:
conversion-type-id:
type-specifier-seq conversion-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_conversion_type_id (cp_parser* parser)
{
tree attributes;
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
tree type_specified;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* Parse the type-specifiers. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifiers);
/* If that didn't work, stop. */
if (type_specifiers.type == error_mark_node)
return error_mark_node;
/* Parse the conversion-declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME,
/*initialized=*/0, &attributes);
if (attributes)
cplus_decl_attributes (&type_specified, attributes, /*flags=*/0);
/* Don't give this error when parsing tentatively. This happens to
work because we always parse this definitively once. */
if (! cp_parser_uncommitted_to_tentative_parse_p (parser)
&& type_uses_auto (type_specified))
{
error ("invalid use of %<auto%> in conversion operator");
return error_mark_node;
}
return type_specified;
}
/* Parse an (optional) conversion-declarator.
conversion-declarator:
ptr-operator conversion-declarator [opt]
*/
static cp_declarator *
cp_parser_conversion_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree class_type;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Try the ptr-operator. */
code = cp_parser_ptr_operator (parser, &class_type, &cv_quals);
/* If it worked, look for more conversion-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
return cp_parser_make_indirect_declarator
(code, class_type, cv_quals, declarator);
}
return NULL;
}
/* Parse an (optional) ctor-initializer.
ctor-initializer:
: mem-initializer-list
Returns TRUE iff the ctor-initializer was actually present. */
static bool
cp_parser_ctor_initializer_opt (cp_parser* parser)
{
/* If the next token is not a `:', then there is no
ctor-initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
/* Do default initialization of any bases and members. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (NULL_TREE);
return false;
}
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* And the mem-initializer-list. */
cp_parser_mem_initializer_list (parser);
return true;
}
/* Parse a mem-initializer-list.
mem-initializer-list:
mem-initializer ... [opt]
mem-initializer ... [opt] , mem-initializer-list */
static void
cp_parser_mem_initializer_list (cp_parser* parser)
{
tree mem_initializer_list = NULL_TREE;
tree target_ctor = error_mark_node;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Let the semantic analysis code know that we are starting the
mem-initializer-list. */
if (!DECL_CONSTRUCTOR_P (current_function_decl))
error_at (token->location,
"only constructors take member initializers");
/* Loop through the list. */
while (true)
{
tree mem_initializer;
token = cp_lexer_peek_token (parser->lexer);
/* Parse the mem-initializer. */
mem_initializer = cp_parser_mem_initializer (parser);
/* If the next token is a `...', we're expanding member initializers. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* The TREE_PURPOSE must be a _TYPE, because base-specifiers
can be expanded but members cannot. */
if (mem_initializer != error_mark_node
&& !TYPE_P (TREE_PURPOSE (mem_initializer)))
{
error_at (token->location,
"cannot expand initializer for member %<%D%>",
TREE_PURPOSE (mem_initializer));
mem_initializer = error_mark_node;
}
/* Construct the pack expansion type. */
if (mem_initializer != error_mark_node)
mem_initializer = make_pack_expansion (mem_initializer);
}
if (target_ctor != error_mark_node
&& mem_initializer != error_mark_node)
{
error ("mem-initializer for %qD follows constructor delegation",
TREE_PURPOSE (mem_initializer));
mem_initializer = error_mark_node;
}
/* Look for a target constructor. */
if (mem_initializer != error_mark_node
&& TYPE_P (TREE_PURPOSE (mem_initializer))
&& same_type_p (TREE_PURPOSE (mem_initializer), current_class_type))
{
maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS);
if (mem_initializer_list)
{
error ("constructor delegation follows mem-initializer for %qD",
TREE_PURPOSE (mem_initializer_list));
mem_initializer = error_mark_node;
}
target_ctor = mem_initializer;
}
/* Add it to the list, unless it was erroneous. */
if (mem_initializer != error_mark_node)
{
TREE_CHAIN (mem_initializer) = mem_initializer_list;
mem_initializer_list = mem_initializer;
}
/* If the next token is not a `,', we're done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
/* Perform semantic analysis. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (mem_initializer_list);
}
/* Parse a mem-initializer.
mem-initializer:
mem-initializer-id ( expression-list [opt] )
mem-initializer-id braced-init-list
GNU extension:
mem-initializer:
( expression-list [opt] )
Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base
class) or FIELD_DECL (for a non-static data member) to initialize;
the TREE_VALUE is the expression-list. An empty initialization
list is represented by void_list_node. */
static tree
cp_parser_mem_initializer (cp_parser* parser)
{
tree mem_initializer_id;
tree expression_list;
tree member;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Find out what is being initialized. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
permerror (token->location,
"anachronistic old-style base class initializer");
mem_initializer_id = NULL_TREE;
}
else
{
mem_initializer_id = cp_parser_mem_initializer_id (parser);
if (mem_initializer_id == error_mark_node)
return mem_initializer_id;
}
member = expand_member_init (mem_initializer_id);
if (member && !DECL_P (member))
in_base_initializer = 1;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_non_constant_p;
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expression_list = cp_parser_braced_list (parser, &expr_non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1;
expression_list = build_tree_list (NULL_TREE, expression_list);
}
else
{
VEC(tree,gc)* vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
return error_mark_node;
expression_list = build_tree_list_vec (vec);
release_tree_vector (vec);
}
if (expression_list == error_mark_node)
return error_mark_node;
if (!expression_list)
expression_list = void_type_node;
in_base_initializer = 0;
return member ? build_tree_list (member, expression_list) : error_mark_node;
}
/* Parse a mem-initializer-id.
mem-initializer-id:
:: [opt] nested-name-specifier [opt] class-name
identifier
Returns a TYPE indicating the class to be initializer for the first
production. Returns an IDENTIFIER_NODE indicating the data member
to be initialized for the second production. */
static tree
cp_parser_mem_initializer_id (cp_parser* parser)
{
bool global_scope_p;
bool nested_name_specifier_p;
bool template_p = false;
tree id;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* `typename' is not allowed in this context ([temp.res]). */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
error_at (token->location,
"keyword %<typename%> not allowed in this context (a qualified "
"member initializer is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to assume that we have seen the `typename' keyword at this
point. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
/*is_declaration=*/true)
!= NULL_TREE);
if (nested_name_specifier_p)
template_p = cp_parser_optional_template_keyword (parser);
/* If there is a `::' operator or a nested-name-specifier, then we
are definitely looking for a class-name. */
if (global_scope_p || nested_name_specifier_p)
return cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/template_p,
typename_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* Otherwise, we could also be looking for an ordinary identifier. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
id = cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* If we found one, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, look for an ordinary identifier. */
return cp_parser_identifier (parser);
}
/* Overloading [gram.over] */
/* Parse an operator-function-id.
operator-function-id:
operator operator
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator_function_id (cp_parser* parser)
{
/* Look for the `operator' keyword. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR))
return error_mark_node;
/* And then the name of the operator itself. */
return cp_parser_operator (parser);
}
/* Return an identifier node for a user-defined literal operator.
The suffix identifier is chained to the operator name identifier. */
static tree
cp_literal_operator_id (const char* name)
{
tree identifier;
char *buffer = XNEWVEC (char, strlen (UDLIT_OP_ANSI_PREFIX)
+ strlen (name) + 10);
sprintf (buffer, UDLIT_OP_ANSI_FORMAT, name);
identifier = get_identifier (buffer);
/*IDENTIFIER_UDLIT_OPNAME_P (identifier) = 1; If we get a flag someday. */
return identifier;
}
/* Parse an operator.
operator:
new delete new[] delete[] + - * / % ^ & | ~ ! = < >
+= -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= &&
|| ++ -- , ->* -> () []
GNU Extensions:
operator:
<? >? <?= >?=
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator (cp_parser* parser)
{
tree id = NULL_TREE;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Figure out which operator we have. */
switch (token->type)
{
case CPP_KEYWORD:
{
enum tree_code op;
/* The keyword should be either `new' or `delete'. */
if (token->keyword == RID_NEW)
op = NEW_EXPR;
else if (token->keyword == RID_DELETE)
op = DELETE_EXPR;
else
break;
/* Consume the `new' or `delete' token. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `[' token then this is the array variant of the
operator. */
if (token->type == CPP_OPEN_SQUARE)
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
id = ansi_opname (op == NEW_EXPR
? VEC_NEW_EXPR : VEC_DELETE_EXPR);
}
/* Otherwise, we have the non-array variant. */
else
id = ansi_opname (op);
return id;
}
case CPP_PLUS:
id = ansi_opname (PLUS_EXPR);
break;
case CPP_MINUS:
id = ansi_opname (MINUS_EXPR);
break;
case CPP_MULT:
id = ansi_opname (MULT_EXPR);
break;
case CPP_DIV:
id = ansi_opname (TRUNC_DIV_EXPR);
break;
case CPP_MOD:
id = ansi_opname (TRUNC_MOD_EXPR);
break;
case CPP_XOR:
id = ansi_opname (BIT_XOR_EXPR);
break;
case CPP_AND:
id = ansi_opname (BIT_AND_EXPR);
break;
case CPP_OR:
id = ansi_opname (BIT_IOR_EXPR);
break;
case CPP_COMPL:
id = ansi_opname (BIT_NOT_EXPR);
break;
case CPP_NOT:
id = ansi_opname (TRUTH_NOT_EXPR);
break;
case CPP_EQ:
id = ansi_assopname (NOP_EXPR);
break;
case CPP_LESS:
id = ansi_opname (LT_EXPR);
break;
case CPP_GREATER:
id = ansi_opname (GT_EXPR);
break;
case CPP_PLUS_EQ:
id = ansi_assopname (PLUS_EXPR);
break;
case CPP_MINUS_EQ:
id = ansi_assopname (MINUS_EXPR);
break;
case CPP_MULT_EQ:
id = ansi_assopname (MULT_EXPR);
break;
case CPP_DIV_EQ:
id = ansi_assopname (TRUNC_DIV_EXPR);
break;
case CPP_MOD_EQ:
id = ansi_assopname (TRUNC_MOD_EXPR);
break;
case CPP_XOR_EQ:
id = ansi_assopname (BIT_XOR_EXPR);
break;
case CPP_AND_EQ:
id = ansi_assopname (BIT_AND_EXPR);
break;
case CPP_OR_EQ:
id = ansi_assopname (BIT_IOR_EXPR);
break;
case CPP_LSHIFT:
id = ansi_opname (LSHIFT_EXPR);
break;
case CPP_RSHIFT:
id = ansi_opname (RSHIFT_EXPR);
break;
case CPP_LSHIFT_EQ:
id = ansi_assopname (LSHIFT_EXPR);
break;
case CPP_RSHIFT_EQ:
id = ansi_assopname (RSHIFT_EXPR);
break;
case CPP_EQ_EQ:
id = ansi_opname (EQ_EXPR);
break;
case CPP_NOT_EQ:
id = ansi_opname (NE_EXPR);
break;
case CPP_LESS_EQ:
id = ansi_opname (LE_EXPR);
break;
case CPP_GREATER_EQ:
id = ansi_opname (GE_EXPR);
break;
case CPP_AND_AND:
id = ansi_opname (TRUTH_ANDIF_EXPR);
break;
case CPP_OR_OR:
id = ansi_opname (TRUTH_ORIF_EXPR);
break;
case CPP_PLUS_PLUS:
id = ansi_opname (POSTINCREMENT_EXPR);
break;
case CPP_MINUS_MINUS:
id = ansi_opname (PREDECREMENT_EXPR);
break;
case CPP_COMMA:
id = ansi_opname (COMPOUND_EXPR);
break;
case CPP_DEREF_STAR:
id = ansi_opname (MEMBER_REF);
break;
case CPP_DEREF:
id = ansi_opname (COMPONENT_REF);
break;
case CPP_OPEN_PAREN:
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return ansi_opname (CALL_EXPR);
case CPP_OPEN_SQUARE:
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return ansi_opname (ARRAY_REF);
case CPP_STRING:
if (cxx_dialect == cxx98)
maybe_warn_cpp0x (CPP0X_USER_DEFINED_LITERALS);
if (TREE_STRING_LENGTH (token->u.value) > 2)
{
error ("expected empty string after %<operator%> keyword");
return error_mark_node;
}
/* Consume the string. */
cp_lexer_consume_token (parser->lexer);
/* Look for the suffix identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME)
{
id = cp_parser_identifier (parser);
if (id != error_mark_node)
{
const char *name = IDENTIFIER_POINTER (id);
return cp_literal_operator_id (name);
}
}
else
{
error ("expected suffix identifier");
return error_mark_node;
}
case CPP_STRING_USERDEF:
error ("missing space between %<\"\"%> and suffix identifier");
return error_mark_node;
default:
/* Anything else is an error. */
break;
}
/* If we have selected an identifier, we need to consume the
operator token. */
if (id)
cp_lexer_consume_token (parser->lexer);
/* Otherwise, no valid operator name was present. */
else
{
cp_parser_error (parser, "expected operator");
id = error_mark_node;
}
return id;
}
/* Parse a template-declaration.
template-declaration:
export [opt] template < template-parameter-list > declaration
If MEMBER_P is TRUE, this template-declaration occurs within a
class-specifier.
The grammar rule given by the standard isn't correct. What
is really meant is:
template-declaration:
export [opt] template-parameter-list-seq
decl-specifier-seq [opt] init-declarator [opt] ;
export [opt] template-parameter-list-seq
function-definition
template-parameter-list-seq:
template-parameter-list-seq [opt]
template < template-parameter-list > */
static void
cp_parser_template_declaration (cp_parser* parser, bool member_p)
{
/* Check for `export'. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT))
{
/* Consume the `export' token. */
cp_lexer_consume_token (parser->lexer);
/* Warn that we do not support `export'. */
warning (0, "keyword %<export%> not implemented, and will be ignored");
}
cp_parser_template_declaration_after_export (parser, member_p);
}
/* Parse a template-parameter-list.
template-parameter-list:
template-parameter
template-parameter-list , template-parameter
Returns a TREE_LIST. Each node represents a template parameter.
The nodes are connected via their TREE_CHAINs. */
static tree
cp_parser_template_parameter_list (cp_parser* parser)
{
tree parameter_list = NULL_TREE;
begin_template_parm_list ();
/* The loop below parses the template parms. We first need to know
the total number of template parms to be able to compute proper
canonical types of each dependent type. So after the loop, when
we know the total number of template parms,
end_template_parm_list computes the proper canonical types and
fixes up the dependent types accordingly. */
while (true)
{
tree parameter;
bool is_non_type;
bool is_parameter_pack;
location_t parm_loc;
/* Parse the template-parameter. */
parm_loc = cp_lexer_peek_token (parser->lexer)->location;
parameter = cp_parser_template_parameter (parser,
&is_non_type,
&is_parameter_pack);
/* Add it to the list. */
if (parameter != error_mark_node)
parameter_list = process_template_parm (parameter_list,
parm_loc,
parameter,
is_non_type,
is_parameter_pack,
0);
else
{
tree err_parm = build_tree_list (parameter, parameter);
parameter_list = chainon (parameter_list, err_parm);
}
/* If the next token is not a `,', we're done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return end_template_parm_list (parameter_list);
}
/* Parse a template-parameter.
template-parameter:
type-parameter
parameter-declaration
If all goes well, returns a TREE_LIST. The TREE_VALUE represents
the parameter. The TREE_PURPOSE is the default value, if any.
Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true
iff this parameter is a non-type parameter. *IS_PARAMETER_PACK is
set to true iff this parameter is a parameter pack. */
static tree
cp_parser_template_parameter (cp_parser* parser, bool *is_non_type,
bool *is_parameter_pack)
{
cp_token *token;
cp_parameter_declarator *parameter_declarator;
cp_declarator *id_declarator;
tree parm;
/* Assume it is a type parameter or a template parameter. */
*is_non_type = false;
/* Assume it not a parameter pack. */
*is_parameter_pack = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is `class' or `template', we have a type-parameter. */
if (token->keyword == RID_TEMPLATE)
return cp_parser_type_parameter (parser, is_parameter_pack);
/* If it is `class' or `typename' we do not know yet whether it is a
type parameter or a non-type parameter. Consider:
template <typename T, typename T::X X> ...
or:
template <class C, class D*> ...
Here, the first parameter is a type parameter, and the second is
a non-type parameter. We can tell by looking at the token after
the identifier -- if it is a `,', `=', or `>' then we have a type
parameter. */
if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS)
{
/* Peek at the token after `class' or `typename'. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an ellipsis, we have a template type parameter
pack. */
if (token->type == CPP_ELLIPSIS)
return cp_parser_type_parameter (parser, is_parameter_pack);
/* If it's an identifier, skip it. */
if (token->type == CPP_NAME)
token = cp_lexer_peek_nth_token (parser->lexer, 3);
/* Now, see if the token looks like the end of a template
parameter. */
if (token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_GREATER)
return cp_parser_type_parameter (parser, is_parameter_pack);
}
/* Otherwise, it is a non-type parameter.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
*is_non_type = true;
parameter_declarator
= cp_parser_parameter_declaration (parser, /*template_parm_p=*/true,
/*parenthesized_p=*/NULL);
/* If the parameter declaration is marked as a parameter pack, set
*IS_PARAMETER_PACK to notify the caller. Also, unmark the
declarator's PACK_EXPANSION_P, otherwise we'll get errors from
grokdeclarator. */
if (parameter_declarator
&& parameter_declarator->declarator
&& parameter_declarator->declarator->parameter_pack_p)
{
*is_parameter_pack = true;
parameter_declarator->declarator->parameter_pack_p = false;
}
/* If the next token is an ellipsis, and we don't already have it
marked as a parameter pack, then we have a parameter pack (that
has no declarator). */
if (!*is_parameter_pack
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)
&& declarator_can_be_parameter_pack (parameter_declarator->declarator))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* We might end up with a pack expansion as the type of the non-type
template parameter, in which case this is a non-type template
parameter pack. */
else if (parameter_declarator
&& parameter_declarator->decl_specifiers.type
&& PACK_EXPANSION_P (parameter_declarator->decl_specifiers.type))
{
*is_parameter_pack = true;
parameter_declarator->decl_specifiers.type =
PACK_EXPANSION_PATTERN (parameter_declarator->decl_specifiers.type);
}
if (*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Parameter packs cannot have default arguments. However, a
user may try to do so, so we'll parse them and give an
appropriate diagnostic here. */
cp_token *start_token = cp_lexer_peek_token (parser->lexer);
/* Find the name of the parameter pack. */
id_declarator = parameter_declarator->declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
if (id_declarator && id_declarator->kind == cdk_id)
error_at (start_token->location,
"template parameter pack %qD cannot have a default argument",
id_declarator->u.id.unqualified_name);
else
error_at (start_token->location,
"template parameter pack cannot have a default argument");
/* Parse the default argument, but throw away the result. */
cp_parser_default_argument (parser, /*template_parm_p=*/true);
}
parm = grokdeclarator (parameter_declarator->declarator,
¶meter_declarator->decl_specifiers,
TPARM, /*initialized=*/0,
/*attrlist=*/NULL);
if (parm == error_mark_node)
return error_mark_node;
return build_tree_list (parameter_declarator->default_argument, parm);
}
/* Parse a type-parameter.
type-parameter:
class identifier [opt]
class identifier [opt] = type-id
typename identifier [opt]
typename identifier [opt] = type-id
template < template-parameter-list > class identifier [opt]
template < template-parameter-list > class identifier [opt]
= id-expression
GNU Extension (variadic templates):
type-parameter:
class ... identifier [opt]
typename ... identifier [opt]
Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The
TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is
the declaration of the parameter.
Sets *IS_PARAMETER_PACK if this is a template parameter pack. */
static tree
cp_parser_type_parameter (cp_parser* parser, bool *is_parameter_pack)
{
cp_token *token;
tree parameter;
/* Look for a keyword to tell us what kind of parameter this is. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_TYPENAME_TEMPLATE);
if (!token)
return error_mark_node;
switch (token->keyword)
{
case RID_CLASS:
case RID_TYPENAME:
{
tree identifier;
tree default_argument;
/* If the next token is an ellipsis, we have a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* If the next token is an identifier, then it names the
parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Create the parameter. */
parameter = finish_template_type_parm (class_type_node, identifier);
/* If the next token is an `=', we have a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the default-argument. */
push_deferring_access_checks (dk_no_deferred);
default_argument = cp_parser_type_id (parser);
/* Template parameter packs cannot have default
arguments. */
if (*is_parameter_pack)
{
if (identifier)
error_at (token->location,
"template parameter pack %qD cannot have a "
"default argument", identifier);
else
error_at (token->location,
"template parameter packs cannot have "
"default arguments");
default_argument = NULL_TREE;
}
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
case RID_TEMPLATE:
{
tree identifier;
tree default_argument;
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Parse the template-parameter-list. */
cp_parser_template_parameter_list (parser);
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* Look for the `class' keyword. */
cp_parser_require_keyword (parser, RID_CLASS, RT_CLASS);
/* If the next token is an ellipsis, we have a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* If the next token is an `=', then there is a
default-argument. If the next token is a `>', we are at
the end of the parameter-list. If the next token is a `,',
then we are at the end of this parameter. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
{
identifier = cp_parser_identifier (parser);
/* Treat invalid names as if the parameter were nameless. */
if (identifier == error_mark_node)
identifier = NULL_TREE;
}
else
identifier = NULL_TREE;
/* Create the template parameter. */
parameter = finish_template_template_parm (class_type_node,
identifier);
/* If the next token is an `=', then there is a
default-argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
bool is_template;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the id-expression. */
push_deferring_access_checks (dk_no_deferred);
/* save token before parsing the id-expression, for error
reporting */
token = cp_lexer_peek_token (parser->lexer);
default_argument
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/&is_template,
/*declarator_p=*/false,
/*optional_p=*/false);
if (TREE_CODE (default_argument) == TYPE_DECL)
/* If the id-expression was a template-id that refers to
a template-class, we already have the declaration here,
so no further lookup is needed. */
;
else
/* Look up the name. */
default_argument
= cp_parser_lookup_name (parser, default_argument,
none_type,
/*is_template=*/is_template,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
token->location);
/* See if the default argument is valid. */
default_argument
= check_template_template_default_arg (default_argument);
/* Template parameter packs cannot have default
arguments. */
if (*is_parameter_pack)
{
if (identifier)
error_at (token->location,
"template parameter pack %qD cannot "
"have a default argument",
identifier);
else
error_at (token->location, "template parameter packs cannot "
"have default arguments");
default_argument = NULL_TREE;
}
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
default:
gcc_unreachable ();
break;
}
return parameter;
}
/* Parse a template-id.
template-id:
template-name < template-argument-list [opt] >
If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the
`template' keyword. In this case, a TEMPLATE_ID_EXPR will be
returned. Otherwise, if the template-name names a function, or set
of functions, returns a TEMPLATE_ID_EXPR. If the template-name
names a class, returns a TYPE_DECL for the specialization.
If CHECK_DEPENDENCY_P is FALSE, names are looked up in
uninstantiated templates. */
static tree
cp_parser_template_id (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
bool is_declaration)
{
int i;
tree templ;
tree arguments;
tree template_id;
cp_token_position start_of_id = 0;
deferred_access_check *chk;
VEC (deferred_access_check,gc) *access_check;
cp_token *next_token = NULL, *next_token_2 = NULL;
bool is_identifier;
/* If the next token corresponds to a template-id, there is no need
to reparse it. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type == CPP_TEMPLATE_ID)
{
struct tree_check *check_value;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
access_check = check_value->checks;
if (access_check)
{
FOR_EACH_VEC_ELT (deferred_access_check, access_check, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl);
}
/* Return the stored value. */
return check_value->value;
}
/* Avoid performing name lookup if there is no possibility of
finding a template-id. */
if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR)
|| (next_token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2)))
{
cp_parser_error (parser, "expected template-id");
return error_mark_node;
}
/* Remember where the template-id starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start_of_id = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
/* Parse the template-name. */
is_identifier = false;
templ = cp_parser_template_name (parser, template_keyword_p,
check_dependency_p,
is_declaration,
&is_identifier);
if (templ == error_mark_node || is_identifier)
{
pop_deferring_access_checks ();
return templ;
}
/* If we find the sequence `[:' after a template-name, it's probably
a digraph-typo for `< ::'. Substitute the tokens and check if we can
parse correctly the argument list. */
next_token = cp_lexer_peek_token (parser->lexer);
next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (next_token->type == CPP_OPEN_SQUARE
&& next_token->flags & DIGRAPH
&& next_token_2->type == CPP_COLON
&& !(next_token_2->flags & PREV_WHITE))
{
cp_parser_parse_tentatively (parser);
/* Change `:' into `::'. */
next_token_2->type = CPP_SCOPE;
/* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is
CPP_LESS. */
cp_lexer_consume_token (parser->lexer);
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
if (!cp_parser_parse_definitely (parser))
{
/* If we couldn't parse an argument list, then we revert our changes
and return simply an error. Maybe this is not a template-id
after all. */
next_token_2->type = CPP_COLON;
cp_parser_error (parser, "expected %<<%>");
pop_deferring_access_checks ();
return error_mark_node;
}
/* Otherwise, emit an error about the invalid digraph, but continue
parsing because we got our argument list. */
if (permerror (next_token->location,
"%<<::%> cannot begin a template-argument list"))
{
static bool hint = false;
inform (next_token->location,
"%<<:%> is an alternate spelling for %<[%>."
" Insert whitespace between %<<%> and %<::%>");
if (!hint && !flag_permissive)
{
inform (next_token->location, "(if you use %<-fpermissive%>"
" G++ will accept your code)");
hint = true;
}
}
}
else
{
/* Look for the `<' that starts the template-argument-list. */
if (!cp_parser_require (parser, CPP_LESS, RT_LESS))
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
}
/* Build a representation of the specialization. */
if (TREE_CODE (templ) == IDENTIFIER_NODE)
template_id = build_min_nt (TEMPLATE_ID_EXPR, templ, arguments);
else if (DECL_TYPE_TEMPLATE_P (templ)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (templ))
{
bool entering_scope;
/* In "template <typename T> ... A<T>::", A<T> is the abstract A
template (rather than some instantiation thereof) only if
is not nested within some other construct. For example, in
"template <typename T> void f(T) { A<T>::", A<T> is just an
instantiation of A. */
entering_scope = (template_parm_scope_p ()
&& cp_lexer_next_token_is (parser->lexer,
CPP_SCOPE));
template_id
= finish_template_type (templ, arguments, entering_scope);
}
else
{
/* If it's not a class-template or a template-template, it should be
a function-template. */
gcc_assert ((DECL_FUNCTION_TEMPLATE_P (templ)
|| TREE_CODE (templ) == OVERLOAD
|| BASELINK_P (templ)));
template_id = lookup_template_function (templ, arguments);
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the template-id with a CPP_TEMPLATE_ID token. That way,
should we re-parse the token stream, we will not have to repeat
the effort required to do the parse, nor will we issue duplicate
error messages about problems during instantiation of the
template. */
if (start_of_id)
{
cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id);
/* Reset the contents of the START_OF_ID token. */
token->type = CPP_TEMPLATE_ID;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = ggc_alloc_cleared_tree_check ();
token->u.tree_check_value->value = template_id;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start_of_id);
/* ??? Can we actually assume that, if template_id ==
error_mark_node, we will have issued a diagnostic to the
user, as opposed to simply marking the tentative parse as
failed? */
if (cp_parser_error_occurred (parser) && template_id != error_mark_node)
error_at (token->location, "parse error in template argument list");
}
pop_deferring_access_checks ();
return template_id;
}
/* Parse a template-name.
template-name:
identifier
The standard should actually say:
template-name:
identifier
operator-function-id
A defect report has been filed about this issue.
A conversion-function-id cannot be a template name because they cannot
be part of a template-id. In fact, looking at this code:
a.operator K<int>()
the conversion-function-id is "operator K<int>", and K<int> is a type-id.
It is impossible to call a templated conversion-function-id with an
explicit argument list, since the only allowed template parameter is
the type to which it is converting.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword, in a construction like:
T::template f<3>()
In that case `f' is taken to be a template-name, even though there
is no way of knowing for sure.
Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the
name refers to a set of overloaded functions, at least one of which
is a template, or an IDENTIFIER_NODE with the name of the template,
if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE,
names are looked up inside uninstantiated templates. */
static tree
cp_parser_template_name (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool is_declaration,
bool *is_identifier)
{
tree identifier;
tree decl;
tree fns;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `operator', then we have either an
operator-function-id or a conversion-function-id. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR))
{
/* We don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
identifier = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
{
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* Look for the identifier. */
else
identifier = cp_parser_identifier (parser);
/* If we didn't find an identifier, we don't have a template-id. */
if (identifier == error_mark_node)
return error_mark_node;
/* If the name immediately followed the `template' keyword, then it
is a template-name. However, if the next token is not `<', then
we do not treat it as a template-name, since it is not being used
as part of a template-id. This enables us to handle constructs
like:
template <typename T> struct S { S(); };
template <typename T> S<T>::S();
correctly. We would treat `S' as a template -- if it were `S<T>'
-- but we do not if there is no `<'. */
if (processing_template_decl
&& cp_parser_nth_token_starts_template_argument_list_p (parser, 1))
{
/* In a declaration, in a dependent context, we pretend that the
"template" keyword was present in order to improve error
recovery. For example, given:
template <typename T> void f(T::X<int>);
we want to treat "X<int>" as a template-id. */
if (is_declaration
&& !template_keyword_p
&& parser->scope && TYPE_P (parser->scope)
&& check_dependency_p
&& dependent_scope_p (parser->scope)
/* Do not do this for dtors (or ctors), since they never
need the template keyword before their name. */
&& !constructor_name_p (identifier, parser->scope))
{
cp_token_position start = 0;
/* Explain what went wrong. */
error_at (token->location, "non-template %qD used as template",
identifier);
inform (token->location, "use %<%T::template %D%> to indicate that it is a template",
parser->scope, identifier);
/* If parsing tentatively, find the location of the "<" token. */
if (cp_parser_simulate_error (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Parse the template arguments so that we can issue error
messages about them. */
cp_lexer_consume_token (parser->lexer);
cp_parser_enclosed_template_argument_list (parser);
/* Skip tokens until we find a good place from which to
continue parsing. */
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/false);
/* If parsing tentatively, permanently remove the
template argument list. That will prevent duplicate
error messages from being issued about the missing
"template" keyword. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
if (is_identifier)
*is_identifier = true;
return identifier;
}
/* If the "template" keyword is present, then there is generally
no point in doing name-lookup, so we just return IDENTIFIER.
But, if the qualifying scope is non-dependent then we can
(and must) do name-lookup normally. */
if (template_keyword_p
&& (!parser->scope
|| (TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))))
return identifier;
}
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
none_type,
/*is_template=*/true,
/*is_namespace=*/false,
check_dependency_p,
/*ambiguous_decls=*/NULL,
token->location);
/* If DECL is a template, then the name was a template-name. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
;
else
{
tree fn = NULL_TREE;
/* The standard does not explicitly indicate whether a name that
names a set of overloaded declarations, some of which are
templates, is a template-name. However, such a name should
be a template-name; otherwise, there is no way to form a
template-id for the overloaded templates. */
fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl;
if (TREE_CODE (fns) == OVERLOAD)
for (fn = fns; fn; fn = OVL_NEXT (fn))
if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL)
break;
if (!fn)
{
/* The name does not name a template. */
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* If DECL is dependent, and refers to a function, then just return
its name; we will look it up again during template instantiation. */
if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl))
{
tree scope = ovl_scope (decl);
if (TYPE_P (scope) && dependent_type_p (scope))
return identifier;
}
return decl;
}
/* Parse a template-argument-list.
template-argument-list:
template-argument ... [opt]
template-argument-list , template-argument ... [opt]
Returns a TREE_VEC containing the arguments. */
static tree
cp_parser_template_argument_list (cp_parser* parser)
{
tree fixed_args[10];
unsigned n_args = 0;
unsigned alloced = 10;
tree *arg_ary = fixed_args;
tree vec;
bool saved_in_template_argument_list_p;
bool saved_ice_p;
bool saved_non_ice_p;
saved_in_template_argument_list_p = parser->in_template_argument_list_p;
parser->in_template_argument_list_p = true;
/* Even if the template-id appears in an integral
constant-expression, the contents of the argument list do
not. */
saved_ice_p = parser->integral_constant_expression_p;
parser->integral_constant_expression_p = false;
saved_non_ice_p = parser->non_integral_constant_expression_p;
parser->non_integral_constant_expression_p = false;
/* Parse the arguments. */
do
{
tree argument;
if (n_args)
/* Consume the comma. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-argument. */
argument = cp_parser_template_argument (parser);
/* If the next token is an ellipsis, we're expanding a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
if (argument == error_mark_node)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"expected parameter pack before %<...%>");
}
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
/* Make the argument into a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
argument = make_pack_expansion (argument);
}
if (n_args == alloced)
{
alloced *= 2;
if (arg_ary == fixed_args)
{
arg_ary = XNEWVEC (tree, alloced);
memcpy (arg_ary, fixed_args, sizeof (tree) * n_args);
}
else
arg_ary = XRESIZEVEC (tree, arg_ary, alloced);
}
arg_ary[n_args++] = argument;
}
while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
vec = make_tree_vec (n_args);
while (n_args--)
TREE_VEC_ELT (vec, n_args) = arg_ary[n_args];
if (arg_ary != fixed_args)
free (arg_ary);
parser->non_integral_constant_expression_p = saved_non_ice_p;
parser->integral_constant_expression_p = saved_ice_p;
parser->in_template_argument_list_p = saved_in_template_argument_list_p;
#ifdef ENABLE_CHECKING
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec));
#endif
return vec;
}
/* Parse a template-argument.
template-argument:
assignment-expression
type-id
id-expression
The representation is that of an assignment-expression, type-id, or
id-expression -- except that the qualified id-expression is
evaluated, so that the value returned is either a DECL or an
OVERLOAD.
Although the standard says "assignment-expression", it forbids
throw-expressions or assignments in the template argument.
Therefore, we use "conditional-expression" instead. */
static tree
cp_parser_template_argument (cp_parser* parser)
{
tree argument;
bool template_p;
bool address_p;
bool maybe_type_id = false;
cp_token *token = NULL, *argument_start_token = NULL;
cp_id_kind idk;
/* There's really no way to know what we're looking at, so we just
try each alternative in order.
[temp.arg]
In a template-argument, an ambiguity between a type-id and an
expression is resolved to a type-id, regardless of the form of
the corresponding template-parameter.
Therefore, we try a type-id first. */
cp_parser_parse_tentatively (parser);
argument = cp_parser_template_type_arg (parser);
/* If there was no error parsing the type-id but the next token is a
'>>', our behavior depends on which dialect of C++ we're
parsing. In C++98, we probably found a typo for '> >'. But there
are type-id which are also valid expressions. For instance:
struct X { int operator >> (int); };
template <int V> struct Foo {};
Foo<X () >> 5> r;
Here 'X()' is a valid type-id of a function type, but the user just
wanted to write the expression "X() >> 5". Thus, we remember that we
found a valid type-id, but we still try to parse the argument as an
expression to see what happens.
In C++0x, the '>>' will be considered two separate '>'
tokens. */
if (!cp_parser_error_occurred (parser)
&& cxx_dialect == cxx98
&& cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
maybe_type_id = true;
cp_parser_abort_tentative_parse (parser);
}
else
{
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. This means that the argument is not a valid
type-id. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return argument;
}
/* We're still not sure what the argument will be. */
cp_parser_parse_tentatively (parser);
/* Try a template. */
argument_start_token = cp_lexer_peek_token (parser->lexer);
argument = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (!cp_parser_error_occurred (parser))
{
/* Figure out what is being referred to. If the id-expression
was for a class template specialization, then we will have a
TYPE_DECL at this point. There is no need to do name lookup
at this point in that case. */
if (TREE_CODE (argument) != TYPE_DECL)
argument = cp_parser_lookup_name (parser, argument,
none_type,
/*is_template=*/template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
argument_start_token->location);
if (TREE_CODE (argument) != TEMPLATE_DECL
&& TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE)
cp_parser_error (parser, "expected template-name");
}
if (cp_parser_parse_definitely (parser))
return argument;
/* It must be a non-type argument. There permitted cases are given
in [temp.arg.nontype]:
-- an integral constant-expression of integral or enumeration
type; or
-- the name of a non-type template-parameter; or
-- the name of an object or function with external linkage...
-- the address of an object or function with external linkage...
-- a pointer to member... */
/* Look for a non-type template parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
/*address_p=*/false,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
return argument;
}
/* If the next token is "&", the argument must be the address of an
object or function with external linkage. */
address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND);
if (address_p)
cp_lexer_consume_token (parser->lexer);
/* See if we might have an id-expression. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->keyword == RID_OPERATOR
|| token->type == CPP_SCOPE
|| token->type == CPP_TEMPLATE_ID
|| token->type == CPP_NESTED_NAME_SPECIFIER)
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
address_p,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (cp_parser_error_occurred (parser)
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_abort_tentative_parse (parser);
else
{
tree probe;
if (TREE_CODE (argument) == INDIRECT_REF)
{
gcc_assert (REFERENCE_REF_P (argument));
argument = TREE_OPERAND (argument, 0);
}
/* If we're in a template, we represent a qualified-id referring
to a static data member as a SCOPE_REF even if the scope isn't
dependent so that we can check access control later. */
probe = argument;
if (TREE_CODE (probe) == SCOPE_REF)
probe = TREE_OPERAND (probe, 1);
if (TREE_CODE (probe) == VAR_DECL)
{
/* A variable without external linkage might still be a
valid constant-expression, so no error is issued here
if the external-linkage check fails. */
if (!address_p && !DECL_EXTERNAL_LINKAGE_P (probe))
cp_parser_simulate_error (parser);
}
else if (is_overloaded_fn (argument))
/* All overloaded functions are allowed; if the external
linkage test does not pass, an error will be issued
later. */
;
else if (address_p
&& (TREE_CODE (argument) == OFFSET_REF
|| TREE_CODE (argument) == SCOPE_REF))
/* A pointer-to-member. */
;
else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX)
;
else
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
{
if (address_p)
argument = build_x_unary_op (ADDR_EXPR, argument,
tf_warning_or_error);
return argument;
}
}
}
/* If the argument started with "&", there are no other valid
alternatives at this point. */
if (address_p)
{
cp_parser_error (parser, "invalid non-type template argument");
return error_mark_node;
}
/* If the argument wasn't successfully parsed as a type-id followed
by '>>', the argument can only be a constant expression now.
Otherwise, we try parsing the constant-expression tentatively,
because the argument could really be a type-id. */
if (maybe_type_id)
cp_parser_parse_tentatively (parser);
argument = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
/*non_constant_p=*/NULL);
argument = fold_non_dependent_expr (argument);
if (!maybe_type_id)
return argument;
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (cp_parser_parse_definitely (parser))
return argument;
/* We did our best to parse the argument as a non type-id, but that
was the only alternative that matched (albeit with a '>' after
it). We can assume it's just a typo from the user, and a
diagnostic will then be issued. */
return cp_parser_template_type_arg (parser);
}
/* Parse an explicit-instantiation.
explicit-instantiation:
template declaration
Although the standard says `declaration', what it really means is:
explicit-instantiation:
template decl-specifier-seq [opt] declarator [opt] ;
Things like `template int S<int>::i = 5, int S<double>::j;' are not
supposed to be allowed. A defect report has been filed about this
issue.
GNU Extension:
explicit-instantiation:
storage-class-specifier template
decl-specifier-seq [opt] declarator [opt] ;
function-specifier template
decl-specifier-seq [opt] declarator [opt] ; */
static void
cp_parser_explicit_instantiation (cp_parser* parser)
{
int declares_class_or_enum;
cp_decl_specifier_seq decl_specifiers;
tree extension_specifier = NULL_TREE;
timevar_push (TV_TEMPLATE_INST);
/* Look for an (optional) storage-class-specifier or
function-specifier. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
extension_specifier
= cp_parser_storage_class_specifier_opt (parser);
if (!extension_specifier)
extension_specifier
= cp_parser_function_specifier_opt (parser,
/*decl_specs=*/NULL);
}
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE);
/* Let the front end know that we are processing an explicit
instantiation. */
begin_explicit_instantiation ();
/* [temp.explicit] says that we are supposed to ignore access
control while processing explicit instantiation directives. */
push_deferring_access_checks (dk_no_check);
/* Parse a decl-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* If there was exactly one decl-specifier, and it declared a class,
and there's no declarator, then we have an explicit type
instantiation. */
if (declares_class_or_enum && cp_parser_declares_only_class_p (parser))
{
tree type;
type = check_tag_decl (&decl_specifiers);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
if (type)
do_type_instantiation (type, extension_specifier,
/*complain=*/tf_error);
}
else
{
cp_declarator *declarator;
tree decl;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers.type,
decl_specifiers.type_location);
if (declarator != cp_error_declarator)
{
if (decl_specifiers.specs[(int)ds_inline])
permerror (input_location, "explicit instantiation shall not use"
" %<inline%> specifier");
if (decl_specifiers.specs[(int)ds_constexpr])
permerror (input_location, "explicit instantiation shall not use"
" %<constexpr%> specifier");
decl = grokdeclarator (declarator, &decl_specifiers,
NORMAL, 0, &decl_specifiers.attributes);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
/* Do the explicit instantiation. */
do_decl_instantiation (decl, extension_specifier);
}
else
{
pop_deferring_access_checks ();
/* Skip the body of the explicit instantiation. */
cp_parser_skip_to_end_of_statement (parser);
}
}
/* We're done with the instantiation. */
end_explicit_instantiation ();
cp_parser_consume_semicolon_at_end_of_statement (parser);
timevar_pop (TV_TEMPLATE_INST);
}
/* Parse an explicit-specialization.
explicit-specialization:
template < > declaration
Although the standard says `declaration', what it really means is:
explicit-specialization:
template <> decl-specifier [opt] init-declarator [opt] ;
template <> function-definition
template <> explicit-specialization
template <> template-declaration */
static void
cp_parser_explicit_specialization (cp_parser* parser)
{
bool need_lang_pop;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE);
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* We have processed another parameter list. */
++parser->num_template_parameter_lists;
/* [temp]
A template ... explicit specialization ... shall not have C
linkage. */
if (current_lang_name == lang_name_c)
{
error_at (token->location, "template specialization with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* Let the front end know that we are beginning a specialization. */
if (!begin_specialization ())
{
end_specialization ();
return;
}
/* If the next keyword is `template', we need to figure out whether
or not we're looking a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER)
cp_parser_template_declaration_after_export (parser,
/*member_p=*/false);
else
cp_parser_explicit_specialization (parser);
}
else
/* Parse the dependent declaration. */
cp_parser_single_declaration (parser,
/*checks=*/NULL,
/*member_p=*/false,
/*explicit_specialization_p=*/true,
/*friend_p=*/NULL);
/* We're done with the specialization. */
end_specialization ();
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* We're done with this parameter list. */
--parser->num_template_parameter_lists;
}
/* Parse a type-specifier.
type-specifier:
simple-type-specifier
class-specifier
enum-specifier
elaborated-type-specifier
cv-qualifier
GNU Extension:
type-specifier:
__complex__
Returns a representation of the type-specifier. For a
class-specifier, enum-specifier, or elaborated-type-specifier, a
TREE_TYPE is returned; otherwise, a TYPE_DECL is returned.
The parser flags FLAGS is used to control type-specifier parsing.
If IS_DECLARATION is TRUE, then this type-specifier is appearing
in a decl-specifier-seq.
If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a
class-specifier, enum-specifier, or elaborated-type-specifier, then
*DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1
if a type is declared; 2 if it is defined. Otherwise, it is set to
zero.
If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a
cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it
is set to FALSE. */
static tree
cp_parser_type_specifier (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
bool is_declaration,
int* declares_class_or_enum,
bool* is_cv_qualifier)
{
tree type_spec = NULL_TREE;
cp_token *token;
enum rid keyword;
cp_decl_spec ds = ds_last;
/* Assume this type-specifier does not declare a new type. */
if (declares_class_or_enum)
*declares_class_or_enum = 0;
/* And that it does not specify a cv-qualifier. */
if (is_cv_qualifier)
*is_cv_qualifier = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, we can use that to guide the
production we choose. */
keyword = token->keyword;
switch (keyword)
{
case RID_ENUM:
if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS))
goto elaborated_type_specifier;
/* Look for the enum-specifier. */
type_spec = cp_parser_enum_specifier (parser);
/* If that worked, we're done. */
if (type_spec)
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token->location,
/*type_definition_p=*/true);
return type_spec;
}
else
goto elaborated_type_specifier;
/* Any of these indicate either a class-specifier, or an
elaborated-type-specifier. */
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS))
goto elaborated_type_specifier;
/* Parse tentatively so that we can back up if we don't find a
class-specifier. */
cp_parser_parse_tentatively (parser);
/* Look for the class-specifier. */
type_spec = cp_parser_class_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, type_spec);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token->location,
/*type_definition_p=*/true);
return type_spec;
}
/* Fall through. */
elaborated_type_specifier:
/* We're declaring (not defining) a class or enum. */
if (declares_class_or_enum)
*declares_class_or_enum = 1;
/* Fall through. */
case RID_TYPENAME:
/* Look for an elaborated-type-specifier. */
type_spec
= (cp_parser_elaborated_type_specifier
(parser,
decl_specs && decl_specs->specs[(int) ds_friend],
is_declaration));
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token->location,
/*type_definition_p=*/false);
return type_spec;
case RID_CONST:
ds = ds_const;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_VOLATILE:
ds = ds_volatile;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_RESTRICT:
ds = ds_restrict;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_COMPLEX:
/* The `__complex__' keyword is a GNU extension. */
ds = ds_complex;
break;
default:
break;
}
/* Handle simple keywords. */
if (ds != ds_last)
{
if (decl_specs)
{
++decl_specs->specs[(int)ds];
decl_specs->any_specifiers_p = true;
}
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* If we do not already have a type-specifier, assume we are looking
at a simple-type-specifier. */
type_spec = cp_parser_simple_type_specifier (parser,
decl_specs,
flags);
/* If we didn't find a type-specifier, and a type-specifier was not
optional in this context, issue an error message. */
if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type specifier");
return error_mark_node;
}
return type_spec;
}
/* Parse a simple-type-specifier.
simple-type-specifier:
:: [opt] nested-name-specifier [opt] type-name
:: [opt] nested-name-specifier template template-id
char
wchar_t
bool
short
int
long
signed
unsigned
float
double
void
C++0x Extension:
simple-type-specifier:
auto
decltype ( expression )
char16_t
char32_t
__underlying_type ( type-id )
GNU Extension:
simple-type-specifier:
__int128
__typeof__ unary-expression
__typeof__ ( type-id )
Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is
appropriately updated. */
static tree
cp_parser_simple_type_specifier (cp_parser* parser,
cp_decl_specifier_seq *decl_specs,
cp_parser_flags flags)
{
tree type = NULL_TREE;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, things are easy. */
switch (token->keyword)
{
case RID_CHAR:
if (decl_specs)
decl_specs->explicit_char_p = true;
type = char_type_node;
break;
case RID_CHAR16:
type = char16_type_node;
break;
case RID_CHAR32:
type = char32_type_node;
break;
case RID_WCHAR:
type = wchar_type_node;
break;
case RID_BOOL:
type = boolean_type_node;
break;
case RID_SHORT:
if (decl_specs)
++decl_specs->specs[(int) ds_short];
type = short_integer_type_node;
break;
case RID_INT:
if (decl_specs)
decl_specs->explicit_int_p = true;
type = integer_type_node;
break;
case RID_INT128:
if (!int128_integer_type_node)
break;
if (decl_specs)
decl_specs->explicit_int128_p = true;
type = int128_integer_type_node;
break;
case RID_LONG:
if (decl_specs)
++decl_specs->specs[(int) ds_long];
type = long_integer_type_node;
break;
case RID_SIGNED:
if (decl_specs)
++decl_specs->specs[(int) ds_signed];
type = integer_type_node;
break;
case RID_UNSIGNED:
if (decl_specs)
++decl_specs->specs[(int) ds_unsigned];
type = unsigned_type_node;
break;
case RID_FLOAT:
type = float_type_node;
break;
case RID_DOUBLE:
type = double_type_node;
break;
case RID_VOID:
type = void_type_node;
break;
case RID_AUTO:
maybe_warn_cpp0x (CPP0X_AUTO);
type = make_auto ();
break;
case RID_DECLTYPE:
/* Since DR 743, decltype can either be a simple-type-specifier by
itself or begin a nested-name-specifier. Parsing it will replace
it with a CPP_DECLTYPE, so just rewind and let the CPP_DECLTYPE
handling below decide what to do. */
cp_parser_decltype (parser);
cp_lexer_set_token_position (parser->lexer, token);
break;
case RID_TYPEOF:
/* Consume the `typeof' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the operand to `typeof'. */
type = cp_parser_sizeof_operand (parser, RID_TYPEOF);
/* If it is not already a TYPE, take its type. */
if (!TYPE_P (type))
type = finish_typeof (type);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
return type;
case RID_UNDERLYING_TYPE:
type = cp_parser_trait_expr (parser, RID_UNDERLYING_TYPE);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
return type;
case RID_BASES:
case RID_DIRECT_BASES:
type = cp_parser_trait_expr (parser, token->keyword);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
return type;
default:
break;
}
/* If token is an already-parsed decltype not followed by ::,
it's a simple-type-specifier. */
if (token->type == CPP_DECLTYPE
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE)
{
type = token->u.value;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
cp_lexer_consume_token (parser->lexer);
return type;
}
/* If the type-specifier was for a built-in type, we're done. */
if (type)
{
/* Record the type. */
if (decl_specs
&& (token->keyword != RID_SIGNED
&& token->keyword != RID_UNSIGNED
&& token->keyword != RID_SHORT
&& token->keyword != RID_LONG))
cp_parser_set_decl_spec_type (decl_specs,
type,
token->location,
/*type_definition_p=*/false);
if (decl_specs)
decl_specs->any_specifiers_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user thought
that the type was a template. */
cp_parser_check_for_invalid_template_id (parser, type, token->location);
return TYPE_NAME (type);
}
/* The type-specifier must be a user-defined type. */
if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES))
{
bool qualified_p;
bool global_p;
/* Don't gobble tokens or issue error messages if this is an
optional type-specifier. */
if (flags & CP_PARSER_FLAGS_OPTIONAL)
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
global_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the nested-name specifier. */
qualified_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
token = cp_lexer_peek_token (parser->lexer);
/* If we have seen a nested-name-specifier, and the next token
is `template', then we are using the template-id production. */
if (parser->scope
&& cp_parser_optional_template_keyword (parser))
{
/* Look for the template-id. */
type = cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/true,
/*is_declaration=*/false);
/* If the template-id did not name a type, we are out of
luck. */
if (TREE_CODE (type) != TYPE_DECL)
{
cp_parser_error (parser, "expected template-id for type");
type = NULL_TREE;
}
}
/* Otherwise, look for a type-name. */
else
type = cp_parser_type_name (parser);
/* Keep track of all name-lookups performed in class scopes. */
if (type
&& !global_p
&& !qualified_p
&& TREE_CODE (type) == TYPE_DECL
&& TREE_CODE (DECL_NAME (type)) == IDENTIFIER_NODE)
maybe_note_name_used_in_class (DECL_NAME (type), type);
/* If it didn't work out, we don't have a TYPE. */
if ((flags & CP_PARSER_FLAGS_OPTIONAL)
&& !cp_parser_parse_definitely (parser))
type = NULL_TREE;
if (type && decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
}
/* If we didn't get a type-name, issue an error message. */
if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type-name");
return error_mark_node;
}
if (type && type != error_mark_node)
{
/* See if TYPE is an Objective-C type, and if so, parse and
accept any protocol references following it. Do this before
the cp_parser_check_for_invalid_template_id() call, because
Objective-C types can be followed by '<...>' which would
enclose protocol names rather than template arguments, and so
everything is fine. */
if (c_dialect_objc () && !parser->scope
&& (objc_is_id (type) || objc_is_class_name (type)))
{
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree qual_type = objc_get_protocol_qualified_type (type, protos);
/* Clobber the "unqualified" type previously entered into
DECL_SPECS with the new, improved protocol-qualified version. */
if (decl_specs)
decl_specs->type = qual_type;
return qual_type;
}
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user
thought that the type was a template. */
cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type),
token->location);
}
return type;
}
/* Parse a type-name.
type-name:
class-name
enum-name
typedef-name
simple-template-id [in c++0x]
enum-name:
identifier
typedef-name:
identifier
Returns a TYPE_DECL for the type. */
static tree
cp_parser_type_name (cp_parser* parser)
{
tree type_decl;
/* We can't know yet whether it is a class-name or not. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If it's not a class-name, keep looking. */
if (!cp_parser_parse_definitely (parser))
{
if (cxx_dialect < cxx0x)
/* It must be a typedef-name or an enum-name. */
return cp_parser_nonclass_name (parser);
cp_parser_parse_tentatively (parser);
/* It is either a simple-template-id representing an
instantiation of an alias template... */
type_decl = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*is_declaration=*/false);
/* Note that this must be an instantiation of an alias template
because [temp.names]/6 says:
A template-id that names an alias template specialization
is a type-name.
Whereas [temp.names]/7 says:
A simple-template-id that names a class template
specialization is a class-name. */
if (type_decl != NULL_TREE
&& TREE_CODE (type_decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (type_decl))
gcc_assert (DECL_TEMPLATE_INSTANTIATION (type_decl));
else
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
/* ... Or a typedef-name or an enum-name. */
return cp_parser_nonclass_name (parser);
}
return type_decl;
}
/* Parse a non-class type-name, that is, either an enum-name or a typedef-name.
enum-name:
identifier
typedef-name:
identifier
Returns a TYPE_DECL for the type. */
static tree
cp_parser_nonclass_name (cp_parser* parser)
{
tree type_decl;
tree identifier;
cp_token *token = cp_lexer_peek_token (parser->lexer);
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the type-name. */
type_decl = cp_parser_lookup_name_simple (parser, identifier, token->location);
if (TREE_CODE (type_decl) == USING_DECL)
{
if (!DECL_DEPENDENT_P (type_decl))
type_decl = strip_using_decl (type_decl);
else if (USING_DECL_TYPENAME_P (type_decl))
{
/* We have found a type introduced by a using
declaration at class scope that refers to a dependent
type.
using typename :: [opt] nested-name-specifier unqualified-id ;
*/
type_decl = make_typename_type (TREE_TYPE (type_decl),
DECL_NAME (type_decl),
typename_type, tf_error);
if (type_decl != error_mark_node)
type_decl = TYPE_NAME (type_decl);
}
}
if (TREE_CODE (type_decl) != TYPE_DECL
&& (objc_is_id (identifier) || objc_is_class_name (identifier)))
{
/* See if this is an Objective-C type. */
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree type = objc_get_protocol_qualified_type (identifier, protos);
if (type)
type_decl = TYPE_NAME (type);
}
/* Issue an error if we did not find a type-name. */
if (TREE_CODE (type_decl) != TYPE_DECL
/* In Objective-C, we have the complication that class names are
normally type names and start declarations (eg, the
"NSObject" in "NSObject *object;"), but can be used in an
Objective-C 2.0 dot-syntax (as in "NSObject.version") which
is an expression. So, a classname followed by a dot is not a
valid type-name. */
|| (objc_is_class_name (TREE_TYPE (type_decl))
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT))
{
if (!cp_parser_simulate_error (parser))
cp_parser_name_lookup_error (parser, identifier, type_decl,
NLE_TYPE, token->location);
return error_mark_node;
}
/* Remember that the name was used in the definition of the
current class so that we can check later to see if the
meaning would have been different after the class was
entirely defined. */
else if (type_decl != error_mark_node
&& !parser->scope)
maybe_note_name_used_in_class (identifier, type_decl);
return type_decl;
}
/* Parse an elaborated-type-specifier. Note that the grammar given
here incorporates the resolution to DR68.
elaborated-type-specifier:
class-key :: [opt] nested-name-specifier [opt] identifier
class-key :: [opt] nested-name-specifier [opt] template [opt] template-id
enum-key :: [opt] nested-name-specifier [opt] identifier
typename :: [opt] nested-name-specifier identifier
typename :: [opt] nested-name-specifier template [opt]
template-id
GNU extension:
elaborated-type-specifier:
class-key attributes :: [opt] nested-name-specifier [opt] identifier
class-key attributes :: [opt] nested-name-specifier [opt]
template [opt] template-id
enum attributes :: [opt] nested-name-specifier [opt] identifier
If IS_FRIEND is TRUE, then this elaborated-type-specifier is being
declared `friend'. If IS_DECLARATION is TRUE, then this
elaborated-type-specifier appears in a decl-specifiers-seq, i.e.,
something is being declared.
Returns the TYPE specified. */
static tree
cp_parser_elaborated_type_specifier (cp_parser* parser,
bool is_friend,
bool is_declaration)
{
enum tag_types tag_type;
tree identifier;
tree type = NULL_TREE;
tree attributes = NULL_TREE;
tree globalscope;
cp_token *token = NULL;
/* See if we're looking at the `enum' keyword. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM))
{
/* Consume the `enum' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's an enumeration type. */
tag_type = enum_type;
/* Issue a warning if the `struct' or `class' key (for C++0x scoped
enums) is used here. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS)
|| cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT))
{
pedwarn (input_location, 0, "elaborated-type-specifier "
"for a scoped enum must not use the %<%D%> keyword",
cp_lexer_peek_token (parser->lexer)->u.value);
/* Consume the `struct' or `class' and parse it anyway. */
cp_lexer_consume_token (parser->lexer);
}
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Or, it might be `typename'. */
else if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TYPENAME))
{
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's a `typename' type. */
tag_type = typename_type;
}
/* Otherwise it must be a class-key. */
else
{
tag_type = cp_parser_class_key (parser);
if (tag_type == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Look for the `::' operator. */
globalscope = cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
if (tag_type == typename_type && !globalscope)
{
if (!cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration))
return error_mark_node;
}
else
/* Even though `typename' is not present, the proposed resolution
to Core Issue 180 says that in `class A<T>::B', `B' should be
considered a type-name, even if `A<T>' is dependent. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration);
/* For everything but enumeration types, consider a template-id.
For an enumeration type, consider only a plain identifier. */
if (tag_type != enum_type)
{
bool template_p = false;
tree decl;
/* Allow the `template' keyword. */
template_p = cp_parser_optional_template_keyword (parser);
/* If we didn't see `template', we don't know if there's a
template-id or not. */
if (!template_p)
cp_parser_parse_tentatively (parser);
/* Parse the template-id. */
token = cp_lexer_peek_token (parser->lexer);
decl = cp_parser_template_id (parser, template_p,
/*check_dependency_p=*/true,
is_declaration);
/* If we didn't find a template-id, look for an ordinary
identifier. */
if (!template_p && !cp_parser_parse_definitely (parser))
;
/* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is
in effect, then we must assume that, upon instantiation, the
template will correspond to a class. */
else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& tag_type == typename_type)
type = make_typename_type (parser->scope, decl,
typename_type,
/*complain=*/tf_error);
/* If the `typename' keyword is in effect and DECL is not a type
decl. Then type is non existant. */
else if (tag_type == typename_type && TREE_CODE (decl) != TYPE_DECL)
type = NULL_TREE;
else
type = check_elaborated_type_specifier (tag_type, decl,
/*allow_template_p=*/true);
}
if (!type)
{
token = cp_lexer_peek_token (parser->lexer);
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
{
parser->scope = NULL_TREE;
return error_mark_node;
}
/* For a `typename', we needn't call xref_tag. */
if (tag_type == typename_type
&& TREE_CODE (parser->scope) != NAMESPACE_DECL)
return cp_parser_make_typename_type (parser, parser->scope,
identifier,
token->location);
/* Look up a qualified name in the usual way. */
if (parser->scope)
{
tree decl;
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
token->location);
/* If the lookup was ambiguous, an error will already have been
issued. */
if (ambiguous_decls)
return error_mark_node;
/* If we are parsing friend declaration, DECL may be a
TEMPLATE_DECL tree node here. However, we need to check
whether this TEMPLATE_DECL results in valid code. Consider
the following example:
namespace N {
template <class T> class C {};
}
class X {
template <class T> friend class N::C; // #1, valid code
};
template <class T> class Y {
friend class N::C; // #2, invalid code
};
For both case #1 and #2, we arrive at a TEMPLATE_DECL after
name lookup of `N::C'. We see that friend declaration must
be template for the code to be valid. Note that
processing_template_decl does not work here since it is
always 1 for the above two cases. */
decl = (cp_parser_maybe_treat_template_as_class
(decl, /*tag_name_p=*/is_friend
&& parser->num_template_parameter_lists));
if (TREE_CODE (decl) != TYPE_DECL)
{
cp_parser_diagnose_invalid_type_name (parser,
parser->scope,
identifier,
token->location);
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE)
{
bool allow_template = (parser->num_template_parameter_lists
|| DECL_SELF_REFERENCE_P (decl));
type = check_elaborated_type_specifier (tag_type, decl,
allow_template);
if (type == error_mark_node)
return error_mark_node;
}
/* Forward declarations of nested types, such as
class C1::C2;
class C1::C2::C3;
are invalid unless all components preceding the final '::'
are complete. If all enclosing types are complete, these
declarations become merely pointless.
Invalid forward declarations of nested types are errors
caught elsewhere in parsing. Those that are pointless arrive
here. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
&& !is_friend && !processing_explicit_instantiation)
warning (0, "declaration %qD does not declare anything", decl);
type = TREE_TYPE (decl);
}
else
{
/* An elaborated-type-specifier sometimes introduces a new type and
sometimes names an existing type. Normally, the rule is that it
introduces a new type only if there is not an existing type of
the same name already in scope. For example, given:
struct S {};
void f() { struct S s; }
the `struct S' in the body of `f' is the same `struct S' as in
the global scope; the existing definition is used. However, if
there were no global declaration, this would introduce a new
local class named `S'.
An exception to this rule applies to the following code:
namespace N { struct S; }
Here, the elaborated-type-specifier names a new type
unconditionally; even if there is already an `S' in the
containing scope this declaration names a new type.
This exception only applies if the elaborated-type-specifier
forms the complete declaration:
[class.name]
A declaration consisting solely of `class-key identifier ;' is
either a redeclaration of the name in the current scope or a
forward declaration of the identifier as a class name. It
introduces the name into the current scope.
We are in this situation precisely when the next token is a `;'.
An exception to the exception is that a `friend' declaration does
*not* name a new type; i.e., given:
struct S { friend struct T; };
`T' is not a new type in the scope of `S'.
Also, `new struct S' or `sizeof (struct S)' never results in the
definition of a new type; a new type can only be declared in a
declaration context. */
tag_scope ts;
bool template_p;
if (is_friend)
/* Friends have special name lookup rules. */
ts = ts_within_enclosing_non_class;
else if (is_declaration
&& cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
/* This is a `class-key identifier ;' */
ts = ts_current;
else
ts = ts_global;
template_p =
(parser->num_template_parameter_lists
&& (cp_parser_next_token_starts_class_definition_p (parser)
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)));
/* An unqualified name was used to reference this type, so
there were no qualifying templates. */
if (!cp_parser_check_template_parameters (parser,
/*num_templates=*/0,
token->location,
/*declarator=*/NULL))
return error_mark_node;
type = xref_tag (tag_type, identifier, ts, template_p);
}
}
if (type == error_mark_node)
return error_mark_node;
/* Allow attributes on forward declarations of classes. */
if (attributes)
{
if (TREE_CODE (type) == TYPENAME_TYPE)
warning (OPT_Wattributes,
"attributes ignored on uninstantiated type");
else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type)
&& ! processing_explicit_instantiation)
warning (OPT_Wattributes,
"attributes ignored on template instantiation");
else if (is_declaration && cp_parser_declares_only_class_p (parser))
cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
else
warning (OPT_Wattributes,
"attributes ignored on elaborated-type-specifier that is not a forward declaration");
}
if (tag_type != enum_type)
{
/* Indicate whether this class was declared as a `class' or as a
`struct'. */
if (TREE_CODE (type) == RECORD_TYPE)
CLASSTYPE_DECLARED_CLASS (type) = (tag_type == class_type);
cp_parser_check_class_key (tag_type, type);
}
/* A "<" cannot follow an elaborated type specifier. If that
happens, the user was probably trying to form a template-id. */
cp_parser_check_for_invalid_template_id (parser, type, token->location);
return type;
}
/* Parse an enum-specifier.
enum-specifier:
enum-head { enumerator-list [opt] }
enum-head { enumerator-list , } [C++0x]
enum-head:
enum-key identifier [opt] enum-base [opt]
enum-key nested-name-specifier identifier enum-base [opt]
enum-key:
enum
enum class [C++0x]
enum struct [C++0x]
enum-base: [C++0x]
: type-specifier-seq
opaque-enum-specifier:
enum-key identifier enum-base [opt] ;
GNU Extensions:
enum-key attributes[opt] identifier [opt] enum-base [opt]
{ enumerator-list [opt] }attributes[opt]
enum-key attributes[opt] identifier [opt] enum-base [opt]
{ enumerator-list, }attributes[opt] [C++0x]
Returns an ENUM_TYPE representing the enumeration, or NULL_TREE
if the token stream isn't an enum-specifier after all. */
static tree
cp_parser_enum_specifier (cp_parser* parser)
{
tree identifier;
tree type = NULL_TREE;
tree prev_scope;
tree nested_name_specifier = NULL_TREE;
tree attributes;
bool scoped_enum_p = false;
bool has_underlying_type = false;
bool nested_being_defined = false;
bool new_value_list = false;
bool is_new_type = false;
bool is_anonymous = false;
tree underlying_type = NULL_TREE;
cp_token *type_start_token = NULL;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* Parse tentatively so that we can back up if we don't find a
enum-specifier. */
cp_parser_parse_tentatively (parser);
/* Caller guarantees that the current token is 'enum', an identifier
possibly follows, and the token after that is an opening brace.
If we don't have an identifier, fabricate an anonymous name for
the enumeration being defined. */
cp_lexer_consume_token (parser->lexer);
/* Parse the "class" or "struct", which indicates a scoped
enumeration type in C++0x. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS)
|| cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT))
{
if (cxx_dialect < cxx0x)
maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS);
/* Consume the `struct' or `class' token. */
cp_lexer_consume_token (parser->lexer);
scoped_enum_p = true;
}
attributes = cp_parser_attributes_opt (parser);
/* Clear the qualification. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Figure out in what scope the declaration is being placed. */
prev_scope = current_scope ();
type_start_token = cp_lexer_peek_token (parser->lexer);
push_deferring_access_checks (dk_no_check);
nested_name_specifier
= cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false);
if (nested_name_specifier)
{
tree name;
identifier = cp_parser_identifier (parser);
name = cp_parser_lookup_name (parser, identifier,
enum_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
input_location);
if (name)
{
type = TREE_TYPE (name);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
/* Are template enums allowed in ISO? */
if (template_parm_scope_p ())
pedwarn (type_start_token->location, OPT_pedantic,
"%qD is an enumeration template", name);
/* ignore a typename reference, for it will be solved by name
in start_enum. */
type = NULL_TREE;
}
}
else
error_at (type_start_token->location,
"%qD is not an enumerator-name", identifier);
}
else
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
{
identifier = make_anon_name ();
is_anonymous = true;
}
}
pop_deferring_access_checks ();
/* Check for the `:' that denotes a specified underlying type in C++0x.
Note that a ':' could also indicate a bitfield width, however. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_decl_specifier_seq type_specifiers;
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifiers);
/* At this point this is surely not elaborated type specifier. */
if (!cp_parser_parse_definitely (parser))
return NULL_TREE;
if (cxx_dialect < cxx0x)
maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS);
has_underlying_type = true;
/* If that didn't work, stop. */
if (type_specifiers.type != error_mark_node)
{
underlying_type = grokdeclarator (NULL, &type_specifiers, TYPENAME,
/*initialized=*/0, NULL);
if (underlying_type == error_mark_node)
underlying_type = NULL_TREE;
}
}
/* Look for the `{' but don't consume it yet. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
if (cxx_dialect < cxx0x || (!scoped_enum_p && !underlying_type))
{
cp_parser_error (parser, "expected %<{%>");
if (has_underlying_type)
{
type = NULL_TREE;
goto out;
}
}
/* An opaque-enum-specifier must have a ';' here. */
if ((scoped_enum_p || underlying_type)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_error (parser, "expected %<;%> or %<{%>");
if (has_underlying_type)
{
type = NULL_TREE;
goto out;
}
}
}
if (!has_underlying_type && !cp_parser_parse_definitely (parser))
return NULL_TREE;
if (nested_name_specifier)
{
if (CLASS_TYPE_P (nested_name_specifier))
{
nested_being_defined = TYPE_BEING_DEFINED (nested_name_specifier);
TYPE_BEING_DEFINED (nested_name_specifier) = 1;
push_scope (nested_name_specifier);
}
else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL)
{
push_nested_namespace (nested_name_specifier);
}
}
/* Issue an error message if type-definitions are forbidden here. */
if (!cp_parser_check_type_definition (parser))
type = error_mark_node;
else
/* Create the new type. We do this before consuming the opening
brace so the enum will be recorded as being on the line of its
tag (or the 'enum' keyword, if there is no tag). */
type = start_enum (identifier, type, underlying_type,
scoped_enum_p, &is_new_type);
/* If the next token is not '{' it is an opaque-enum-specifier or an
elaborated-type-specifier. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
timevar_push (TV_PARSE_ENUM);
if (nested_name_specifier)
{
/* The following catches invalid code such as:
enum class S<int>::E { A, B, C }; */
if (!processing_specialization
&& CLASS_TYPE_P (nested_name_specifier)
&& CLASSTYPE_USE_TEMPLATE (nested_name_specifier))
error_at (type_start_token->location, "cannot add an enumerator "
"list to a template instantiation");
/* If that scope does not contain the scope in which the
class was originally declared, the program is invalid. */
if (prev_scope && !is_ancestor (prev_scope, nested_name_specifier))
{
if (at_namespace_scope_p ())
error_at (type_start_token->location,
"declaration of %qD in namespace %qD which does not "
"enclose %qD",
type, prev_scope, nested_name_specifier);
else
error_at (type_start_token->location,
"declaration of %qD in %qD which does not enclose %qD",
type, prev_scope, nested_name_specifier);
type = error_mark_node;
}
}
if (scoped_enum_p)
begin_scope (sk_scoped_enum, type);
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
if (type == error_mark_node)
; /* Nothing to add */
else if (OPAQUE_ENUM_P (type)
|| (cxx_dialect > cxx98 && processing_specialization))
{
new_value_list = true;
SET_OPAQUE_ENUM_P (type, false);
DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location;
}
else
{
error_at (type_start_token->location, "multiple definition of %q#T", type);
error_at (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)),
"previous definition here");
type = error_mark_node;
}
if (type == error_mark_node)
cp_parser_skip_to_end_of_block_or_statement (parser);
/* If the next token is not '}', then there are some enumerators. */
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE))
cp_parser_enumerator_list (parser, type);
/* Consume the final '}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
if (scoped_enum_p)
finish_scope ();
timevar_pop (TV_PARSE_ENUM);
}
else
{
/* If a ';' follows, then it is an opaque-enum-specifier
and additional restrictions apply. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
if (is_anonymous)
error_at (type_start_token->location,
"opaque-enum-specifier without name");
else if (nested_name_specifier)
error_at (type_start_token->location,
"opaque-enum-specifier must use a simple identifier");
}
}
/* Look for trailing attributes to apply to this enumeration, and
apply them if appropriate. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
tree trailing_attr = cp_parser_attributes_opt (parser);
trailing_attr = chainon (trailing_attr, attributes);
cplus_decl_attributes (&type,
trailing_attr,
(int) ATTR_FLAG_TYPE_IN_PLACE);
}
/* Finish up the enumeration. */
if (type != error_mark_node)
{
if (new_value_list)
finish_enum_value_list (type);
if (is_new_type)
finish_enum (type);
}
if (nested_name_specifier)
{
if (CLASS_TYPE_P (nested_name_specifier))
{
TYPE_BEING_DEFINED (nested_name_specifier) = nested_being_defined;
pop_scope (nested_name_specifier);
}
else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL)
{
pop_nested_namespace (nested_name_specifier);
}
}
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
return type;
}
/* Parse an enumerator-list. The enumerators all have the indicated
TYPE.
enumerator-list:
enumerator-definition
enumerator-list , enumerator-definition */
static void
cp_parser_enumerator_list (cp_parser* parser, tree type)
{
while (true)
{
/* Parse an enumerator-definition. */
cp_parser_enumerator_definition (parser, type);
/* If the next token is not a ',', we've reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
/* If the next token is a `}', there is a trailing comma. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
if (cxx_dialect < cxx0x && !in_system_header)
pedwarn (input_location, OPT_pedantic,
"comma at end of enumerator list");
break;
}
}
}
/* Parse an enumerator-definition. The enumerator has the indicated
TYPE.
enumerator-definition:
enumerator
enumerator = constant-expression
enumerator:
identifier */
static void
cp_parser_enumerator_definition (cp_parser* parser, tree type)
{
tree identifier;
tree value;
location_t loc;
/* Save the input location because we are interested in the location
of the identifier and not the location of the explicit value. */
loc = cp_lexer_peek_token (parser->lexer)->location;
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* If the next token is an '=', then there is an explicit value. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the value. */
value = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
}
else
value = NULL_TREE;
/* If we are processing a template, make sure the initializer of the
enumerator doesn't contain any bare template parameter pack. */
if (check_for_bare_parameter_packs (value))
value = error_mark_node;
/* integral_constant_value will pull out this expression, so make sure
it's folded as appropriate. */
value = fold_non_dependent_expr (value);
/* Create the enumerator. */
build_enumerator (identifier, value, type, loc);
}
/* Parse a namespace-name.
namespace-name:
original-namespace-name
namespace-alias
Returns the NAMESPACE_DECL for the namespace. */
static tree
cp_parser_namespace_name (cp_parser* parser)
{
tree identifier;
tree namespace_decl;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Get the name of the namespace. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the identifier in the currently active scope. Look only
for namespaces, due to:
[basic.lookup.udir]
When looking up a namespace-name in a using-directive or alias
definition, only namespace names are considered.
And:
[basic.lookup.qual]
During the lookup of a name preceding the :: scope resolution
operator, object, function, and enumerator names are ignored.
(Note that cp_parser_qualifying_entity only calls this
function if the token after the name is the scope resolution
operator.) */
namespace_decl = cp_parser_lookup_name (parser, identifier,
none_type,
/*is_template=*/false,
/*is_namespace=*/true,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
token->location);
/* If it's not a namespace, issue an error. */
if (namespace_decl == error_mark_node
|| TREE_CODE (namespace_decl) != NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location, "%qD is not a namespace-name", identifier);
cp_parser_error (parser, "expected namespace-name");
namespace_decl = error_mark_node;
}
return namespace_decl;
}
/* Parse a namespace-definition.
namespace-definition:
named-namespace-definition
unnamed-namespace-definition
named-namespace-definition:
original-namespace-definition
extension-namespace-definition
original-namespace-definition:
namespace identifier { namespace-body }
extension-namespace-definition:
namespace original-namespace-name { namespace-body }
unnamed-namespace-definition:
namespace { namespace-body } */
static void
cp_parser_namespace_definition (cp_parser* parser)
{
tree identifier, attribs;
bool has_visibility;
bool is_inline;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_INLINE))
{
maybe_warn_cpp0x (CPP0X_INLINE_NAMESPACES);
is_inline = true;
cp_lexer_consume_token (parser->lexer);
}
else
is_inline = false;
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Get the name of the namespace. We do not attempt to distinguish
between an original-namespace-definition and an
extension-namespace-definition at this point. The semantic
analysis routines are responsible for that. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Parse any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Look for the `{' to start the namespace. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* Start the namespace. */
push_namespace (identifier);
/* "inline namespace" is equivalent to a stub namespace definition
followed by a strong using directive. */
if (is_inline)
{
tree name_space = current_namespace;
/* Set up namespace association. */
DECL_NAMESPACE_ASSOCIATIONS (name_space)
= tree_cons (CP_DECL_CONTEXT (name_space), NULL_TREE,
DECL_NAMESPACE_ASSOCIATIONS (name_space));
/* Import the contents of the inline namespace. */
pop_namespace ();
do_using_directive (name_space);
push_namespace (identifier);
}
has_visibility = handle_namespace_attrs (current_namespace, attribs);
/* Parse the body of the namespace. */
cp_parser_namespace_body (parser);
if (has_visibility)
pop_visibility (1);
/* Finish the namespace. */
pop_namespace ();
/* Look for the final `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* Parse a namespace-body.
namespace-body:
declaration-seq [opt] */
static void
cp_parser_namespace_body (cp_parser* parser)
{
cp_parser_declaration_seq_opt (parser);
}
/* Parse a namespace-alias-definition.
namespace-alias-definition:
namespace identifier = qualified-namespace-specifier ; */
static void
cp_parser_namespace_alias_definition (cp_parser* parser)
{
tree identifier;
tree namespace_specifier;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* Look for the `=' token. */
if (!cp_parser_uncommitted_to_tentative_parse_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
error_at (token->location, "%<namespace%> definition is not allowed here");
/* Skip the definition. */
cp_lexer_consume_token (parser->lexer);
if (cp_parser_skip_to_closing_brace (parser))
cp_lexer_consume_token (parser->lexer);
return;
}
cp_parser_require (parser, CPP_EQ, RT_EQ);
/* Look for the qualified-namespace-specifier. */
namespace_specifier
= cp_parser_qualified_namespace_specifier (parser);
/* Look for the `;' token. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* Register the alias in the symbol table. */
do_namespace_alias (identifier, namespace_specifier);
}
/* Parse a qualified-namespace-specifier.
qualified-namespace-specifier:
:: [opt] nested-name-specifier [opt] namespace-name
Returns a NAMESPACE_DECL corresponding to the specified
namespace. */
static tree
cp_parser_qualified_namespace_specifier (cp_parser* parser)
{
/* Look for the optional `::'. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
return cp_parser_namespace_name (parser);
}
/* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an
access declaration.
using-declaration:
using typename [opt] :: [opt] nested-name-specifier unqualified-id ;
using :: unqualified-id ;
access-declaration:
qualified-id ;
*/
static bool
cp_parser_using_declaration (cp_parser* parser,
bool access_declaration_p)
{
cp_token *token;
bool typename_p = false;
bool global_scope_p;
tree decl;
tree identifier;
tree qscope;
int oldcount = errorcount;
cp_token *diag_token = NULL;
if (access_declaration_p)
{
diag_token = cp_lexer_peek_token (parser->lexer);
cp_parser_parse_tentatively (parser);
}
else
{
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's `typename'. */
if (token->keyword == RID_TYPENAME)
{
/* Remember that we've seen it. */
typename_p = true;
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Look for the optional global scope qualification. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* If we saw `typename', or didn't see `::', then there must be a
nested-name-specifier present. */
if (typename_p || !global_scope_p)
qscope = cp_parser_nested_name_specifier (parser, typename_p,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
/* Otherwise, we could be in either of the two productions. In that
case, treat the nested-name-specifier as optional. */
else
qscope = cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
if (!qscope)
qscope = global_namespace;
if (access_declaration_p && cp_parser_error_occurred (parser))
/* Something has already gone wrong; there's no need to parse
further. Since an error has occurred, the return value of
cp_parser_parse_definitely will be false, as required. */
return cp_parser_parse_definitely (parser);
token = cp_lexer_peek_token (parser->lexer);
/* Parse the unqualified-id. */
identifier = cp_parser_unqualified_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*declarator_p=*/true,
/*optional_p=*/false);
if (access_declaration_p)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
return false;
}
/* The function we call to handle a using-declaration is different
depending on what scope we are in. */
if (qscope == error_mark_node || identifier == error_mark_node)
;
else if (TREE_CODE (identifier) != IDENTIFIER_NODE
&& TREE_CODE (identifier) != BIT_NOT_EXPR)
/* [namespace.udecl]
A using declaration shall not name a template-id. */
error_at (token->location,
"a template-id may not appear in a using-declaration");
else
{
if (at_class_scope_p ())
{
/* Create the USING_DECL. */
decl = do_class_using_decl (parser->scope, identifier);
if (decl && typename_p)
USING_DECL_TYPENAME_P (decl) = 1;
if (check_for_bare_parameter_packs (decl))
return false;
else
/* Add it to the list of members in this class. */
finish_member_declaration (decl);
}
else
{
decl = cp_parser_lookup_name_simple (parser,
identifier,
token->location);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, identifier,
decl, NLE_NULL,
token->location);
else if (check_for_bare_parameter_packs (decl))
return false;
else if (!at_namespace_scope_p ())
do_local_using_decl (decl, qscope, identifier);
else
do_toplevel_using_decl (decl, qscope, identifier);
}
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (access_declaration_p && errorcount == oldcount)
warning_at (diag_token->location, OPT_Wdeprecated,
"access declarations are deprecated "
"in favour of using-declarations; "
"suggestion: add the %<using%> keyword");
return true;
}
/* Parse an alias-declaration.
alias-declaration:
using identifier attribute-specifier-seq [opt] = type-id */
static tree
cp_parser_alias_declaration (cp_parser* parser)
{
tree id, type, decl, pushed_scope = NULL_TREE, attributes;
location_t id_location;
cp_declarator *declarator;
cp_decl_specifier_seq decl_specs;
bool member_p;
const char *saved_message = NULL;
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
id_location = cp_lexer_peek_token (parser->lexer)->location;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
return error_mark_node;
attributes = cp_parser_attributes_opt (parser);
if (attributes == error_mark_node)
return error_mark_node;
cp_parser_require (parser, CPP_EQ, RT_EQ);
if (cp_parser_error_occurred (parser))
return error_mark_node;
/* Now we are going to parse the type-id of the declaration. */
/*
[dcl.type]/3 says:
"A type-specifier-seq shall not define a class or enumeration
unless it appears in the type-id of an alias-declaration (7.1.3) that
is not the declaration of a template-declaration."
In other words, if we currently are in an alias template, the
type-id should not define a type.
So let's set parser->type_definition_forbidden_message in that
case; cp_parser_check_type_definition (called by
cp_parser_class_specifier) will then emit an error if a type is
defined in the type-id. */
if (parser->num_template_parameter_lists)
{
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message =
G_("types may not be defined in alias template declarations");
}
type = cp_parser_type_id (parser);
/* Restore the error message if need be. */
if (parser->num_template_parameter_lists)
parser->type_definition_forbidden_message = saved_message;
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (cp_parser_error_occurred (parser))
return error_mark_node;
/* A typedef-name can also be introduced by an alias-declaration. The
identifier following the using keyword becomes a typedef-name. It has
the same semantics as if it were introduced by the typedef
specifier. In particular, it does not define a new type and it shall
not appear in the type-id. */
clear_decl_specs (&decl_specs);
decl_specs.type = type;
decl_specs.attributes = attributes;
++decl_specs.specs[(int) ds_typedef];
++decl_specs.specs[(int) ds_alias];
declarator = make_id_declarator (NULL_TREE, id, sfk_none);
declarator->id_loc = id_location;
member_p = at_class_scope_p ();
if (member_p)
decl = grokfield (declarator, &decl_specs, NULL_TREE, false,
NULL_TREE, attributes);
else
decl = start_decl (declarator, &decl_specs, 0,
attributes, NULL_TREE, &pushed_scope);
if (decl == error_mark_node)
return decl;
cp_finish_decl (decl, NULL_TREE, 0, NULL_TREE, 0);
if (pushed_scope)
pop_scope (pushed_scope);
/* If decl is a template, return its TEMPLATE_DECL so that it gets
added into the symbol table; otherwise, return the TYPE_DECL. */
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)))
{
decl = DECL_TI_TEMPLATE (decl);
if (member_p)
check_member_template (decl);
}
return decl;
}
/* Parse a using-directive.
using-directive:
using namespace :: [opt] nested-name-specifier [opt]
namespace-name ; */
static void
cp_parser_using_directive (cp_parser* parser)
{
tree namespace_decl;
tree attribs;
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
/* And the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* And the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
/* Get the namespace being used. */
namespace_decl = cp_parser_namespace_name (parser);
/* And any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Update the symbol table. */
parse_using_directive (namespace_decl, attribs);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
/* Parse an asm-definition.
asm-definition:
asm ( string-literal ) ;
GNU Extension:
asm-definition:
asm volatile [opt] ( string-literal ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt]
: asm-clobber-list [opt] ) ;
asm volatile [opt] goto ( string-literal : : asm-operand-list [opt]
: asm-clobber-list [opt]
: asm-goto-list ) ; */
static void
cp_parser_asm_definition (cp_parser* parser)
{
tree string;
tree outputs = NULL_TREE;
tree inputs = NULL_TREE;
tree clobbers = NULL_TREE;
tree labels = NULL_TREE;
tree asm_stmt;
bool volatile_p = false;
bool extended_p = false;
bool invalid_inputs_p = false;
bool invalid_outputs_p = false;
bool goto_p = false;
required_token missing = RT_NONE;
/* Look for the `asm' keyword. */
cp_parser_require_keyword (parser, RID_ASM, RT_ASM);
/* See if the next token is `volatile'. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE))
{
/* Remember that we saw the `volatile' keyword. */
volatile_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
if (cp_parser_allow_gnu_extensions_p (parser)
&& parser->in_function_body
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_GOTO))
{
/* Remember that we saw the `goto' keyword. */
goto_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
/* Look for the opening `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return;
/* Look for the string. */
string = cp_parser_string_literal (parser, false, false);
if (string == error_mark_node)
{
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
return;
}
/* If we're allowing GNU extensions, check for the extended assembly
syntax. Unfortunately, the `:' tokens need not be separated by
a space in C, and so, for compatibility, we tolerate that here
too. Doing that means that we have to treat the `::' operator as
two `:' tokens. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& parser->in_function_body
&& (cp_lexer_next_token_is (parser->lexer, CPP_COLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)))
{
bool inputs_p = false;
bool clobbers_p = false;
bool labels_p = false;
/* The extended syntax was used. */
extended_p = true;
/* Look for outputs. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_SCOPE)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN)
&& !goto_p)
outputs = cp_parser_asm_operand_list (parser);
if (outputs == error_mark_node)
invalid_outputs_p = true;
}
/* If the next token is `::', there are no outputs, and the
next token is the beginning of the inputs. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The inputs are coming next. */
inputs_p = true;
/* Look for inputs. */
if (inputs_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_SCOPE)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
inputs = cp_parser_asm_operand_list (parser);
if (inputs == error_mark_node)
invalid_inputs_p = true;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The clobbers are coming next. */
clobbers_p = true;
/* Look for clobbers. */
if (clobbers_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
clobbers_p = true;
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the clobbers. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
clobbers = cp_parser_asm_clobber_list (parser);
}
else if (goto_p
&& cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The labels are coming next. */
labels_p = true;
/* Look for labels. */
if (labels_p
|| (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_COLON)))
{
labels_p = true;
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the labels. */
labels = cp_parser_asm_label_list (parser);
}
if (goto_p && !labels_p)
missing = clobbers_p ? RT_COLON : RT_COLON_SCOPE;
}
else if (goto_p)
missing = RT_COLON_SCOPE;
/* Look for the closing `)'. */
if (!cp_parser_require (parser, missing ? CPP_COLON : CPP_CLOSE_PAREN,
missing ? missing : RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (!invalid_inputs_p && !invalid_outputs_p)
{
/* Create the ASM_EXPR. */
if (parser->in_function_body)
{
asm_stmt = finish_asm_stmt (volatile_p, string, outputs,
inputs, clobbers, labels);
/* If the extended syntax was not used, mark the ASM_EXPR. */
if (!extended_p)
{
tree temp = asm_stmt;
if (TREE_CODE (temp) == CLEANUP_POINT_EXPR)
temp = TREE_OPERAND (temp, 0);
ASM_INPUT_P (temp) = 1;
}
}
else
cgraph_add_asm_node (string);
}
}
/* Declarators [gram.dcl.decl] */
/* Parse an init-declarator.
init-declarator:
declarator initializer [opt]
GNU Extension:
init-declarator:
declarator asm-specification [opt] attributes [opt] initializer [opt]
function-definition:
decl-specifier-seq [opt] declarator ctor-initializer [opt]
function-body
decl-specifier-seq [opt] declarator function-try-block
GNU Extension:
function-definition:
__extension__ function-definition
TM Extension:
function-definition:
decl-specifier-seq [opt] declarator function-transaction-block
The DECL_SPECIFIERS apply to this declarator. Returns a
representation of the entity declared. If MEMBER_P is TRUE, then
this declarator appears in a class scope. The new DECL created by
this declarator is returned.
The CHECKS are access checks that should be performed once we know
what entity is being declared (and, therefore, what classes have
befriended it).
If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and
for a function-definition here as well. If the declarator is a
declarator for a function-definition, *FUNCTION_DEFINITION_P will
be TRUE upon return. By that point, the function-definition will
have been completely parsed.
FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P
is FALSE.
If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the
parsed declaration if it is an uninitialized single declarator not followed
by a `;', or to error_mark_node otherwise. Either way, the trailing `;',
if present, will not be consumed. If returned, this declarator will be
created with SD_INITIALIZED but will not call cp_finish_decl. */
static tree
cp_parser_init_declarator (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
VEC (deferred_access_check,gc)* checks,
bool function_definition_allowed_p,
bool member_p,
int declares_class_or_enum,
bool* function_definition_p,
tree* maybe_range_for_decl)
{
cp_token *token = NULL, *asm_spec_start_token = NULL,
*attributes_start_token = NULL;
cp_declarator *declarator;
tree prefix_attributes;
tree attributes;
tree asm_specification;
tree initializer;
tree decl = NULL_TREE;
tree scope;
int is_initialized;
/* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if
initialized with "= ..", CPP_OPEN_PAREN if initialized with
"(...)". */
enum cpp_ttype initialization_kind;
bool is_direct_init = false;
bool is_non_constant_init;
int ctor_dtor_or_conv_p;
bool friend_p;
tree pushed_scope = NULL_TREE;
bool range_for_decl_p = false;
/* Gather the attributes that were provided with the
decl-specifiers. */
prefix_attributes = decl_specifiers->attributes;
/* Assume that this is not the declarator for a function
definition. */
if (function_definition_p)
*function_definition_p = false;
/* Defer access checks while parsing the declarator; we cannot know
what names are accessible until we know what is being
declared. */
resume_deferring_access_checks ();
/* Parse the declarator. */
token = cp_lexer_peek_token (parser->lexer);
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
member_p);
/* Gather up the deferred checks. */
stop_deferring_access_checks ();
/* If the DECLARATOR was erroneous, there's no need to go
further. */
if (declarator == cp_error_declarator)
return error_mark_node;
/* Check that the number of template-parameter-lists is OK. */
if (!cp_parser_check_declarator_template_parameters (parser, declarator,
token->location))
return error_mark_node;
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers->type,
decl_specifiers->type_location);
/* Figure out what scope the entity declared by the DECLARATOR is
located in. `grokdeclarator' sometimes changes the scope, so
we compute it now. */
scope = get_scope_of_declarator (declarator);
/* Perform any lookups in the declared type which were thought to be
dependent, but are not in the scope of the declarator. */
decl_specifiers->type
= maybe_update_decl_type (decl_specifiers->type, scope);
/* If we're allowing GNU extensions, look for an asm-specification
and attributes. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
/* Look for an asm-specification. */
asm_spec_start_token = cp_lexer_peek_token (parser->lexer);
asm_specification = cp_parser_asm_specification_opt (parser);
/* And attributes. */
attributes_start_token = cp_lexer_peek_token (parser->lexer);
attributes = cp_parser_attributes_opt (parser);
}
else
{
asm_specification = NULL_TREE;
attributes = NULL_TREE;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check to see if the token indicates the start of a
function-definition. */
if (function_declarator_p (declarator)
&& cp_parser_token_starts_function_definition_p (token))
{
if (!function_definition_allowed_p)
{
/* If a function-definition should not appear here, issue an
error message. */
cp_parser_error (parser,
"a function-definition is not allowed here");
return error_mark_node;
}
else
{
location_t func_brace_location
= cp_lexer_peek_token (parser->lexer)->location;
/* Neither attributes nor an asm-specification are allowed
on a function-definition. */
if (asm_specification)
error_at (asm_spec_start_token->location,
"an asm-specification is not allowed "
"on a function-definition");
if (attributes)
error_at (attributes_start_token->location,
"attributes are not allowed on a function-definition");
/* This is a function-definition. */
*function_definition_p = true;
/* Parse the function definition. */
if (member_p)
decl = cp_parser_save_member_function_body (parser,
decl_specifiers,
declarator,
prefix_attributes);
else
decl
= (cp_parser_function_definition_from_specifiers_and_declarator
(parser, decl_specifiers, prefix_attributes, declarator));
if (decl != error_mark_node && DECL_STRUCT_FUNCTION (decl))
{
/* This is where the prologue starts... */
DECL_STRUCT_FUNCTION (decl)->function_start_locus
= func_brace_location;
}
return decl;
}
}
/* [dcl.dcl]
Only in function declarations for constructors, destructors, and
type conversions can the decl-specifier-seq be omitted.
We explicitly postpone this check past the point where we handle
function-definitions because we tolerate function-definitions
that are missing their return types in some modes. */
if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0)
{
cp_parser_error (parser,
"expected constructor, destructor, or type conversion");
return error_mark_node;
}
/* An `=' or an `(', or an '{' in C++0x, indicates an initializer. */
if (token->type == CPP_EQ
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_BRACE)
{
is_initialized = SD_INITIALIZED;
initialization_kind = token->type;
if (maybe_range_for_decl)
*maybe_range_for_decl = error_mark_node;
if (token->type == CPP_EQ
&& function_declarator_p (declarator))
{
cp_token *t2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (t2->keyword == RID_DEFAULT)
is_initialized = SD_DEFAULTED;
else if (t2->keyword == RID_DELETE)
is_initialized = SD_DELETED;
}
}
else
{
/* If the init-declarator isn't initialized and isn't followed by a
`,' or `;', it's not a valid init-declarator. */
if (token->type != CPP_COMMA
&& token->type != CPP_SEMICOLON)
{
if (maybe_range_for_decl && *maybe_range_for_decl != error_mark_node)
range_for_decl_p = true;
else
{
cp_parser_error (parser, "expected initializer");
return error_mark_node;
}
}
is_initialized = SD_UNINITIALIZED;
initialization_kind = CPP_EOF;
}
/* Because start_decl has side-effects, we should only call it if we
know we're going ahead. By this point, we know that we cannot
possibly be looking at any other construct. */
cp_parser_commit_to_tentative_parse (parser);
/* If the decl specifiers were bad, issue an error now that we're
sure this was intended to be a declarator. Then continue
declaring the variable(s), as int, to try to cut down on further
errors. */
if (decl_specifiers->any_specifiers_p
&& decl_specifiers->type == error_mark_node)
{
cp_parser_error (parser, "invalid type in declaration");
decl_specifiers->type = integer_type_node;
}
/* Check to see whether or not this declaration is a friend. */
friend_p = cp_parser_friend_p (decl_specifiers);
/* Enter the newly declared entry in the symbol table. If we're
processing a declaration in a class-specifier, we wait until
after processing the initializer. */
if (!member_p)
{
if (parser->in_unbraced_linkage_specification_p)
decl_specifiers->storage_class = sc_extern;
decl = start_decl (declarator, decl_specifiers,
range_for_decl_p? SD_INITIALIZED : is_initialized,
attributes, prefix_attributes,
&pushed_scope);
/* Adjust location of decl if declarator->id_loc is more appropriate:
set, and decl wasn't merged with another decl, in which case its
location would be different from input_location, and more accurate. */
if (DECL_P (decl)
&& declarator->id_loc != UNKNOWN_LOCATION
&& DECL_SOURCE_LOCATION (decl) == input_location)
DECL_SOURCE_LOCATION (decl) = declarator->id_loc;
}
else if (scope)
/* Enter the SCOPE. That way unqualified names appearing in the
initializer will be looked up in SCOPE. */
pushed_scope = push_scope (scope);
/* Perform deferred access control checks, now that we know in which
SCOPE the declared entity resides. */
if (!member_p && decl)
{
tree saved_current_function_decl = NULL_TREE;
/* If the entity being declared is a function, pretend that we
are in its scope. If it is a `friend', it may have access to
things that would not otherwise be accessible. */
if (TREE_CODE (decl) == FUNCTION_DECL)
{
saved_current_function_decl = current_function_decl;
current_function_decl = decl;
}
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
/* Perform the access control checks for the declarator and the
decl-specifiers. */
perform_deferred_access_checks ();
/* Restore the saved value. */
if (TREE_CODE (decl) == FUNCTION_DECL)
current_function_decl = saved_current_function_decl;
}
/* Parse the initializer. */
initializer = NULL_TREE;
is_direct_init = false;
is_non_constant_init = true;
if (is_initialized)
{
if (function_declarator_p (declarator))
{
cp_token *initializer_start_token = cp_lexer_peek_token (parser->lexer);
if (initialization_kind == CPP_EQ)
initializer = cp_parser_pure_specifier (parser);
else
{
/* If the declaration was erroneous, we don't really
know what the user intended, so just silently
consume the initializer. */
if (decl != error_mark_node)
error_at (initializer_start_token->location,
"initializer provided for function");
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
}
else
{
/* We want to record the extra mangling scope for in-class
initializers of class members and initializers of static data
member templates. The former involves deferring
parsing of the initializer until end of class as with default
arguments. So right here we only handle the latter. */
if (!member_p && processing_template_decl)
start_lambda_scope (decl);
initializer = cp_parser_initializer (parser,
&is_direct_init,
&is_non_constant_init);
if (!member_p && processing_template_decl)
finish_lambda_scope ();
}
}
/* The old parser allows attributes to appear after a parenthesized
initializer. Mark Mitchell proposed removing this functionality
on the GCC mailing lists on 2002-08-13. This parser accepts the
attributes -- but ignores them. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& initialization_kind == CPP_OPEN_PAREN)
if (cp_parser_attributes_opt (parser))
warning (OPT_Wattributes,
"attributes after parenthesized initializer ignored");
/* For an in-class declaration, use `grokfield' to create the
declaration. */
if (member_p)
{
if (pushed_scope)
{
pop_scope (pushed_scope);
pushed_scope = NULL_TREE;
}
decl = grokfield (declarator, decl_specifiers,
initializer, !is_non_constant_init,
/*asmspec=*/NULL_TREE,
prefix_attributes);
if (decl && TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
}
/* Finish processing the declaration. But, skip member
declarations. */
if (!member_p && decl && decl != error_mark_node && !range_for_decl_p)
{
cp_finish_decl (decl,
initializer, !is_non_constant_init,
asm_specification,
/* If the initializer is in parentheses, then this is
a direct-initialization, which means that an
`explicit' constructor is OK. Otherwise, an
`explicit' constructor cannot be used. */
((is_direct_init || !is_initialized)
? LOOKUP_NORMAL : LOOKUP_IMPLICIT));
}
else if ((cxx_dialect != cxx98) && friend_p
&& decl && TREE_CODE (decl) == FUNCTION_DECL)
/* Core issue #226 (C++0x only): A default template-argument
shall not be specified in a friend class template
declaration. */
check_default_tmpl_args (decl, current_template_parms, /*is_primary=*/1,
/*is_partial=*/0, /*is_friend_decl=*/1);
if (!friend_p && pushed_scope)
pop_scope (pushed_scope);
return decl;
}
/* Parse a declarator.
declarator:
direct-declarator
ptr-operator declarator
abstract-declarator:
ptr-operator abstract-declarator [opt]
direct-abstract-declarator
GNU Extensions:
declarator:
attributes [opt] direct-declarator
attributes [opt] ptr-operator declarator
abstract-declarator:
attributes [opt] ptr-operator abstract-declarator [opt]
attributes [opt] direct-abstract-declarator
If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to
detect constructor, destructor or conversion operators. It is set
to -1 if the declarator is a name, and +1 if it is a
function. Otherwise it is set to zero. Usually you just want to
test for >0, but internally the negative value is used.
(The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have
a decl-specifier-seq unless it declares a constructor, destructor,
or conversion. It might seem that we could check this condition in
semantic analysis, rather than parsing, but that makes it difficult
to handle something like `f()'. We want to notice that there are
no decl-specifiers, and therefore realize that this is an
expression, not a declaration.)
If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff
the declarator is a direct-declarator of the form "(...)".
MEMBER_P is true iff this declarator is a member-declarator. */
static cp_declarator *
cp_parser_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool* parenthesized_p,
bool member_p)
{
cp_declarator *declarator;
enum tree_code code;
cp_cv_quals cv_quals;
tree class_type;
tree attributes = NULL_TREE;
/* Assume this is not a constructor, destructor, or type-conversion
operator. */
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
if (cp_parser_allow_gnu_extensions_p (parser))
attributes = cp_parser_attributes_opt (parser);
/* Check for the ptr-operator production. */
cp_parser_parse_tentatively (parser);
/* Parse the ptr-operator. */
code = cp_parser_ptr_operator (parser,
&class_type,
&cv_quals);
/* If that worked, then we have a ptr-operator. */
if (cp_parser_parse_definitely (parser))
{
/* If a ptr-operator was found, then this declarator was not
parenthesized. */
if (parenthesized_p)
*parenthesized_p = true;
/* The dependent declarator is optional if we are parsing an
abstract-declarator. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED)
cp_parser_parse_tentatively (parser);
/* Parse the dependent declarator. */
declarator = cp_parser_declarator (parser, dcl_kind,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* If we are parsing an abstract-declarator, we must handle the
case where the dependent declarator is absent. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED
&& !cp_parser_parse_definitely (parser))
declarator = NULL;
declarator = cp_parser_make_indirect_declarator
(code, class_type, cv_quals, declarator);
}
/* Everything else is a direct-declarator. */
else
{
if (parenthesized_p)
*parenthesized_p = cp_lexer_next_token_is (parser->lexer,
CPP_OPEN_PAREN);
declarator = cp_parser_direct_declarator (parser, dcl_kind,
ctor_dtor_or_conv_p,
member_p);
}
if (attributes && declarator && declarator != cp_error_declarator)
declarator->attributes = attributes;
return declarator;
}
/* Parse a direct-declarator or direct-abstract-declarator.
direct-declarator:
declarator-id
direct-declarator ( parameter-declaration-clause )
cv-qualifier-seq [opt]
exception-specification [opt]
direct-declarator [ constant-expression [opt] ]
( declarator )
direct-abstract-declarator:
direct-abstract-declarator [opt]
( parameter-declaration-clause )
cv-qualifier-seq [opt]
exception-specification [opt]
direct-abstract-declarator [opt] [ constant-expression [opt] ]
( abstract-declarator )
Returns a representation of the declarator. DCL_KIND is
CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a
direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if
we are parsing a direct-declarator. It is
CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case
of ambiguity we prefer an abstract declarator, as per
[dcl.ambig.res]. CTOR_DTOR_OR_CONV_P and MEMBER_P are as for
cp_parser_declarator. */
static cp_declarator *
cp_parser_direct_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool member_p)
{
cp_token *token;
cp_declarator *declarator = NULL;
tree scope = NULL_TREE;
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
bool saved_in_declarator_p = parser->in_declarator_p;
bool first = true;
tree pushed_scope = NULL_TREE;
while (true)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_PAREN)
{
/* This is either a parameter-declaration-clause, or a
parenthesized declarator. When we know we are parsing a
named declarator, it must be a parenthesized declarator
if FIRST is true. For instance, `(int)' is a
parameter-declaration-clause, with an omitted
direct-abstract-declarator. But `((*))', is a
parenthesized abstract declarator. Finally, when T is a
template parameter `(T)' is a
parameter-declaration-clause, and not a parenthesized
named declarator.
We first try and parse a parameter-declaration-clause,
and then try a nested declarator (if FIRST is true).
It is not an error for it not to be a
parameter-declaration-clause, even when FIRST is
false. Consider,
int i (int);
int i (3);
The first is the declaration of a function while the
second is the definition of a variable, including its
initializer.
Having seen only the parenthesis, we cannot know which of
these two alternatives should be selected. Even more
complex are examples like:
int i (int (a));
int i (int (3));
The former is a function-declaration; the latter is a
variable initialization.
Thus again, we try a parameter-declaration-clause, and if
that fails, we back out and return. */
if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
{
tree params;
unsigned saved_num_template_parameter_lists;
bool is_declarator = false;
tree t;
/* In a member-declarator, the only valid interpretation
of a parenthesis is the start of a
parameter-declaration-clause. (It is invalid to
initialize a static data member with a parenthesized
initializer; only the "=" form of initialization is
permitted.) */
if (!member_p)
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
if (first)
{
/* If this is going to be an abstract declarator, we're
in a declarator and we can't have default args. */
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
}
/* Inside the function parameter list, surrounding
template-parameter-lists do not apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
begin_scope (sk_function_parms, NULL_TREE);
/* Parse the parameter-declaration-clause. */
params = cp_parser_parameter_declaration_clause (parser);
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
/* Consume the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, parse the cv-qualifier-seq and the
exception-specification. */
if (member_p || cp_parser_parse_definitely (parser))
{
cp_cv_quals cv_quals;
cp_virt_specifiers virt_specifiers;
tree exception_specification;
tree late_return;
is_declarator = true;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0;
first = false;
/* Parse the cv-qualifier-seq. */
cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
/* And the exception-specification. */
exception_specification
= cp_parser_exception_specification_opt (parser);
/* Parse the virt-specifier-seq. */
virt_specifiers = cp_parser_virt_specifier_seq_opt (parser);
late_return = (cp_parser_late_return_type_opt
(parser, member_p ? cv_quals : -1));
/* Create the function-declarator. */
declarator = make_call_declarator (declarator,
params,
cv_quals,
virt_specifiers,
exception_specification,
late_return);
/* Any subsequent parameter lists are to do with
return type, so are not those of the declared
function. */
parser->default_arg_ok_p = false;
}
/* Remove the function parms from scope. */
for (t = current_binding_level->names; t; t = DECL_CHAIN (t))
pop_binding (DECL_NAME (t), t);
leave_scope();
if (is_declarator)
/* Repeat the main loop. */
continue;
}
/* If this is the first, we can try a parenthesized
declarator. */
if (first)
{
bool saved_in_type_id_in_expr_p;
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the nested declarator. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
declarator
= cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
member_p);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
first = false;
/* Expect a `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
declarator = cp_error_declarator;
if (declarator == cp_error_declarator)
break;
goto handle_declarator;
}
/* Otherwise, we must be done. */
else
break;
}
else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
&& token->type == CPP_OPEN_SQUARE)
{
/* Parse an array-declarator. */
tree bounds;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
first = false;
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `]', then there is no
constant-expression. */
if (token->type != CPP_CLOSE_SQUARE)
{
bool non_constant_p;
bounds
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/true,
&non_constant_p);
if (!non_constant_p)
/* OK */;
else if (error_operand_p (bounds))
/* Already gave an error. */;
else if (!parser->in_function_body
|| current_binding_level->kind == sk_function_parms)
{
/* Normally, the array bound must be an integral constant
expression. However, as an extension, we allow VLAs
in function scopes as long as they aren't part of a
parameter declaration. */
cp_parser_error (parser,
"array bound is not an integer constant");
bounds = error_mark_node;
}
else if (processing_template_decl)
{
/* Remember this wasn't a constant-expression. */
bounds = build_nop (TREE_TYPE (bounds), bounds);
TREE_SIDE_EFFECTS (bounds) = 1;
}
}
else
bounds = NULL_TREE;
/* Look for the closing `]'. */
if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE))
{
declarator = cp_error_declarator;
break;
}
declarator = make_array_declarator (declarator, bounds);
}
else if (first && dcl_kind != CP_PARSER_DECLARATOR_ABSTRACT)
{
{
tree qualifying_scope;
tree unqualified_name;
special_function_kind sfk;
bool abstract_ok;
bool pack_expansion_p = false;
cp_token *declarator_id_start_token;
/* Parse a declarator-id */
abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER);
if (abstract_ok)
{
cp_parser_parse_tentatively (parser);
/* If we see an ellipsis, we should be looking at a
parameter pack. */
if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' */
cp_lexer_consume_token (parser->lexer);
pack_expansion_p = true;
}
}
declarator_id_start_token = cp_lexer_peek_token (parser->lexer);
unqualified_name
= cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok);
qualifying_scope = parser->scope;
if (abstract_ok)
{
bool okay = false;
if (!unqualified_name && pack_expansion_p)
{
/* Check whether an error occurred. */
okay = !cp_parser_error_occurred (parser);
/* We already consumed the ellipsis to mark a
parameter pack, but we have no way to report it,
so abort the tentative parse. We will be exiting
immediately anyway. */
cp_parser_abort_tentative_parse (parser);
}
else
okay = cp_parser_parse_definitely (parser);
if (!okay)
unqualified_name = error_mark_node;
else if (unqualified_name
&& (qualifying_scope
|| (TREE_CODE (unqualified_name)
!= IDENTIFIER_NODE)))
{
cp_parser_error (parser, "expected unqualified-id");
unqualified_name = error_mark_node;
}
}
if (!unqualified_name)
return NULL;
if (unqualified_name == error_mark_node)
{
declarator = cp_error_declarator;
pack_expansion_p = false;
declarator->parameter_pack_p = false;
break;
}
if (qualifying_scope && at_namespace_scope_p ()
&& TREE_CODE (qualifying_scope) == TYPENAME_TYPE)
{
/* In the declaration of a member of a template class
outside of the class itself, the SCOPE will sometimes
be a TYPENAME_TYPE. For example, given:
template <typename T>
int S<T>::R::i = 3;
the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In
this context, we must resolve S<T>::R to an ordinary
type, rather than a typename type.
The reason we normally avoid resolving TYPENAME_TYPEs
is that a specialization of `S' might render
`S<T>::R' not a type. However, if `S' is
specialized, then this `i' will not be used, so there
is no harm in resolving the types here. */
tree type;
/* Resolve the TYPENAME_TYPE. */
type = resolve_typename_type (qualifying_scope,
/*only_current_p=*/false);
/* If that failed, the declarator is invalid. */
if (TREE_CODE (type) == TYPENAME_TYPE)
{
if (typedef_variant_p (type))
error_at (declarator_id_start_token->location,
"cannot define member of dependent typedef "
"%qT", type);
else
error_at (declarator_id_start_token->location,
"%<%T::%E%> is not a type",
TYPE_CONTEXT (qualifying_scope),
TYPE_IDENTIFIER (qualifying_scope));
}
qualifying_scope = type;
}
sfk = sfk_none;
if (unqualified_name)
{
tree class_type;
if (qualifying_scope
&& CLASS_TYPE_P (qualifying_scope))
class_type = qualifying_scope;
else
class_type = current_class_type;
if (TREE_CODE (unqualified_name) == TYPE_DECL)
{
tree name_type = TREE_TYPE (unqualified_name);
if (class_type && same_type_p (name_type, class_type))
{
if (qualifying_scope
&& CLASSTYPE_USE_TEMPLATE (name_type))
{
error_at (declarator_id_start_token->location,
"invalid use of constructor as a template");
inform (declarator_id_start_token->location,
"use %<%T::%D%> instead of %<%T::%D%> to "
"name the constructor in a qualified name",
class_type,
DECL_NAME (TYPE_TI_TEMPLATE (class_type)),
class_type, name_type);
declarator = cp_error_declarator;
break;
}
else
unqualified_name = constructor_name (class_type);
}
else
{
/* We do not attempt to print the declarator
here because we do not have enough
information about its original syntactic
form. */
cp_parser_error (parser, "invalid declarator");
declarator = cp_error_declarator;
break;
}
}
if (class_type)
{
if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR)
sfk = sfk_destructor;
else if (IDENTIFIER_TYPENAME_P (unqualified_name))
sfk = sfk_conversion;
else if (/* There's no way to declare a constructor
for an anonymous type, even if the type
got a name for linkage purposes. */
!TYPE_WAS_ANONYMOUS (class_type)
&& constructor_name_p (unqualified_name,
class_type))
{
unqualified_name = constructor_name (class_type);
sfk = sfk_constructor;
}
else if (is_overloaded_fn (unqualified_name)
&& DECL_CONSTRUCTOR_P (get_first_fn
(unqualified_name)))
sfk = sfk_constructor;
if (ctor_dtor_or_conv_p && sfk != sfk_none)
*ctor_dtor_or_conv_p = -1;
}
}
declarator = make_id_declarator (qualifying_scope,
unqualified_name,
sfk);
declarator->id_loc = token->location;
declarator->parameter_pack_p = pack_expansion_p;
if (pack_expansion_p)
maybe_warn_variadic_templates ();
}
handle_declarator:;
scope = get_scope_of_declarator (declarator);
if (scope)
/* Any names that appear after the declarator-id for a
member are looked up in the containing scope. */
pushed_scope = push_scope (scope);
parser->in_declarator_p = true;
if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p)
|| (declarator && declarator->kind == cdk_id))
/* Default args are only allowed on function
declarations. */
parser->default_arg_ok_p = saved_default_arg_ok_p;
else
parser->default_arg_ok_p = false;
first = false;
}
/* We're done. */
else
break;
}
/* For an abstract declarator, we might wind up with nothing at this
point. That's an error; the declarator is not optional. */
if (!declarator)
cp_parser_error (parser, "expected declarator");
/* If we entered a scope, we must exit it now. */
if (pushed_scope)
pop_scope (pushed_scope);
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
return declarator;
}
/* Parse a ptr-operator.
ptr-operator:
* cv-qualifier-seq [opt]
&
:: [opt] nested-name-specifier * cv-qualifier-seq [opt]
GNU Extension:
ptr-operator:
& cv-qualifier-seq [opt]
Returns INDIRECT_REF if a pointer, or pointer-to-member, was used.
Returns ADDR_EXPR if a reference was used, or NON_LVALUE_EXPR for
an rvalue reference. In the case of a pointer-to-member, *TYPE is
filled in with the TYPE containing the member. *CV_QUALS is
filled in with the cv-qualifier-seq, or TYPE_UNQUALIFIED, if there
are no cv-qualifiers. Returns ERROR_MARK if an error occurred.
Note that the tree codes returned by this function have nothing
to do with the types of trees that will be eventually be created
to represent the pointer or reference type being parsed. They are
just constants with suggestive names. */
static enum tree_code
cp_parser_ptr_operator (cp_parser* parser,
tree* type,
cp_cv_quals *cv_quals)
{
enum tree_code code = ERROR_MARK;
cp_token *token;
/* Assume that it's not a pointer-to-member. */
*type = NULL_TREE;
/* And that there are no cv-qualifiers. */
*cv_quals = TYPE_UNQUALIFIED;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `*', `&' or `&&' we have a pointer or reference. */
if (token->type == CPP_MULT)
code = INDIRECT_REF;
else if (token->type == CPP_AND)
code = ADDR_EXPR;
else if ((cxx_dialect != cxx98) &&
token->type == CPP_AND_AND) /* C++0x only */
code = NON_LVALUE_EXPR;
if (code != ERROR_MARK)
{
/* Consume the `*', `&' or `&&'. */
cp_lexer_consume_token (parser->lexer);
/* A `*' can be followed by a cv-qualifier-seq, and so can a
`&', if we are allowing GNU extensions. (The only qualifier
that can legally appear after `&' is `restrict', but that is
enforced during semantic analysis. */
if (code == INDIRECT_REF
|| cp_parser_allow_gnu_extensions_p (parser))
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
}
else
{
/* Try the pointer-to-member case. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name specifier. */
token = cp_lexer_peek_token (parser->lexer);
cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false);
/* If we found it, and the next token is a `*', then we are
indeed looking at a pointer-to-member operator. */
if (!cp_parser_error_occurred (parser)
&& cp_parser_require (parser, CPP_MULT, RT_MULT))
{
/* Indicate that the `*' operator was used. */
code = INDIRECT_REF;
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
error_at (token->location, "%qD is a namespace", parser->scope);
else if (TREE_CODE (parser->scope) == ENUMERAL_TYPE)
error_at (token->location, "cannot form pointer to member of "
"non-class %q#T", parser->scope);
else
{
/* The type of which the member is a member is given by the
current SCOPE. */
*type = parser->scope;
/* The next name will not be qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Look for the optional cv-qualifier-seq. */
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
}
}
/* If that didn't work we don't have a ptr-operator. */
if (!cp_parser_parse_definitely (parser))
cp_parser_error (parser, "expected ptr-operator");
}
return code;
}
/* Parse an (optional) cv-qualifier-seq.
cv-qualifier-seq:
cv-qualifier cv-qualifier-seq [opt]
cv-qualifier:
const
volatile
GNU Extension:
cv-qualifier:
__restrict__
Returns a bitmask representing the cv-qualifiers. */
static cp_cv_quals
cp_parser_cv_qualifier_seq_opt (cp_parser* parser)
{
cp_cv_quals cv_quals = TYPE_UNQUALIFIED;
while (true)
{
cp_token *token;
cp_cv_quals cv_qualifier;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's a cv-qualifier. */
switch (token->keyword)
{
case RID_CONST:
cv_qualifier = TYPE_QUAL_CONST;
break;
case RID_VOLATILE:
cv_qualifier = TYPE_QUAL_VOLATILE;
break;
case RID_RESTRICT:
cv_qualifier = TYPE_QUAL_RESTRICT;
break;
default:
cv_qualifier = TYPE_UNQUALIFIED;
break;
}
if (!cv_qualifier)
break;
if (cv_quals & cv_qualifier)
{
error_at (token->location, "duplicate cv-qualifier");
cp_lexer_purge_token (parser->lexer);
}
else
{
cp_lexer_consume_token (parser->lexer);
cv_quals |= cv_qualifier;
}
}
return cv_quals;
}
/* Parse an (optional) virt-specifier-seq.
virt-specifier-seq:
virt-specifier virt-specifier-seq [opt]
virt-specifier:
override
final
Returns a bitmask representing the virt-specifiers. */
static cp_virt_specifiers
cp_parser_virt_specifier_seq_opt (cp_parser* parser)
{
cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED;
while (true)
{
cp_token *token;
cp_virt_specifiers virt_specifier;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's a virt-specifier-qualifier. */
if (token->type != CPP_NAME)
break;
if (!strcmp (IDENTIFIER_POINTER(token->u.value), "override"))
{
maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS);
virt_specifier = VIRT_SPEC_OVERRIDE;
}
else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "final"))
{
maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS);
virt_specifier = VIRT_SPEC_FINAL;
}
else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "__final"))
{
virt_specifier = VIRT_SPEC_FINAL;
}
else
break;
if (virt_specifiers & virt_specifier)
{
error_at (token->location, "duplicate virt-specifier");
cp_lexer_purge_token (parser->lexer);
}
else
{
cp_lexer_consume_token (parser->lexer);
virt_specifiers |= virt_specifier;
}
}
return virt_specifiers;
}
/* Used by handling of trailing-return-types and NSDMI, in which 'this'
is in scope even though it isn't real. */
static void
inject_this_parameter (tree ctype, cp_cv_quals quals)
{
tree this_parm;
if (current_class_ptr)
{
/* We don't clear this between NSDMIs. Is it already what we want? */
tree type = TREE_TYPE (TREE_TYPE (current_class_ptr));
if (same_type_ignoring_top_level_qualifiers_p (ctype, type)
&& cp_type_quals (type) == quals)
return;
}
this_parm = build_this_parm (ctype, quals);
/* Clear this first to avoid shortcut in cp_build_indirect_ref. */
current_class_ptr = NULL_TREE;
current_class_ref
= cp_build_indirect_ref (this_parm, RO_NULL, tf_warning_or_error);
current_class_ptr = this_parm;
}
/* Parse a late-specified return type, if any. This is not a separate
non-terminal, but part of a function declarator, which looks like
-> trailing-type-specifier-seq abstract-declarator(opt)
Returns the type indicated by the type-id.
QUALS is either a bitmask of cv_qualifiers or -1 for a non-member
function. */
static tree
cp_parser_late_return_type_opt (cp_parser* parser, cp_cv_quals quals)
{
cp_token *token;
tree type;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* A late-specified return type is indicated by an initial '->'. */
if (token->type != CPP_DEREF)
return NULL_TREE;
/* Consume the ->. */
cp_lexer_consume_token (parser->lexer);
if (quals >= 0)
{
/* DR 1207: 'this' is in scope in the trailing return type. */
gcc_assert (current_class_ptr == NULL_TREE);
inject_this_parameter (current_class_type, quals);
}
type = cp_parser_trailing_type_id (parser);
if (quals >= 0)
current_class_ptr = current_class_ref = NULL_TREE;
return type;
}
/* Parse a declarator-id.
declarator-id:
id-expression
:: [opt] nested-name-specifier [opt] type-name
In the `id-expression' case, the value returned is as for
cp_parser_id_expression if the id-expression was an unqualified-id.
If the id-expression was a qualified-id, then a SCOPE_REF is
returned. The first operand is the scope (either a NAMESPACE_DECL
or TREE_TYPE), but the second is still just a representation of an
unqualified-id. */
static tree
cp_parser_declarator_id (cp_parser* parser, bool optional_p)
{
tree id;
/* The expression must be an id-expression. Assume that qualified
names are the names of types so that:
template <class T>
int S<T>::R::i = 3;
will work; we must treat `S<T>::R' as the name of a type.
Similarly, assume that qualified names are templates, where
required, so that:
template <class T>
int S<T>::R<T>::i = 3;
will work, too. */
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*template_p=*/NULL,
/*declarator_p=*/true,
optional_p);
if (id && BASELINK_P (id))
id = BASELINK_FUNCTIONS (id);
return id;
}
/* Parse a type-id.
type-id:
type-specifier-seq abstract-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_type_id_1 (cp_parser* parser, bool is_template_arg,
bool is_trailing_return)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *abstract_declarator;
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
is_trailing_return,
&type_specifier_seq);
if (type_specifier_seq.type == error_mark_node)
return error_mark_node;
/* There might or might not be an abstract declarator. */
cp_parser_parse_tentatively (parser);
/* Look for the declarator. */
abstract_declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Check to see if there really was a declarator. */
if (!cp_parser_parse_definitely (parser))
abstract_declarator = NULL;
if (type_specifier_seq.type
&& type_uses_auto (type_specifier_seq.type))
{
/* A type-id with type 'auto' is only ok if the abstract declarator
is a function declarator with a late-specified return type. */
if (abstract_declarator
&& abstract_declarator->kind == cdk_function
&& abstract_declarator->u.function.late_return_type)
/* OK */;
else
{
error ("invalid use of %<auto%>");
return error_mark_node;
}
}
return groktypename (&type_specifier_seq, abstract_declarator,
is_template_arg);
}
static tree cp_parser_type_id (cp_parser *parser)
{
return cp_parser_type_id_1 (parser, false, false);
}
static tree cp_parser_template_type_arg (cp_parser *parser)
{
tree r;
const char *saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in template arguments");
r = cp_parser_type_id_1 (parser, true, false);
parser->type_definition_forbidden_message = saved_message;
return r;
}
static tree cp_parser_trailing_type_id (cp_parser *parser)
{
return cp_parser_type_id_1 (parser, false, true);
}
/* Parse a type-specifier-seq.
type-specifier-seq:
type-specifier type-specifier-seq [opt]
GNU extension:
type-specifier-seq:
attributes type-specifier-seq [opt]
If IS_DECLARATION is true, we are at the start of a "condition" or
exception-declaration, so we might be followed by a declarator-id.
If IS_TRAILING_RETURN is true, we are in a trailing-return-type,
i.e. we've just seen "->".
Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */
static void
cp_parser_type_specifier_seq (cp_parser* parser,
bool is_declaration,
bool is_trailing_return,
cp_decl_specifier_seq *type_specifier_seq)
{
bool seen_type_specifier = false;
cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL;
cp_token *start_token = NULL;
/* Clear the TYPE_SPECIFIER_SEQ. */
clear_decl_specs (type_specifier_seq);
/* In the context of a trailing return type, enum E { } is an
elaborated-type-specifier followed by a function-body, not an
enum-specifier. */
if (is_trailing_return)
flags |= CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS;
/* Parse the type-specifiers and attributes. */
while (true)
{
tree type_specifier;
bool is_cv_qualifier;
/* Check for attributes first. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
{
type_specifier_seq->attributes =
chainon (type_specifier_seq->attributes,
cp_parser_attributes_opt (parser));
continue;
}
/* record the token of the beginning of the type specifier seq,
for error reporting purposes*/
if (!start_token)
start_token = cp_lexer_peek_token (parser->lexer);
/* Look for the type-specifier. */
type_specifier = cp_parser_type_specifier (parser,
flags,
type_specifier_seq,
/*is_declaration=*/false,
NULL,
&is_cv_qualifier);
if (!type_specifier)
{
/* If the first type-specifier could not be found, this is not a
type-specifier-seq at all. */
if (!seen_type_specifier)
{
cp_parser_error (parser, "expected type-specifier");
type_specifier_seq->type = error_mark_node;
return;
}
/* If subsequent type-specifiers could not be found, the
type-specifier-seq is complete. */
break;
}
seen_type_specifier = true;
/* The standard says that a condition can be:
type-specifier-seq declarator = assignment-expression
However, given:
struct S {};
if (int S = ...)
we should treat the "S" as a declarator, not as a
type-specifier. The standard doesn't say that explicitly for
type-specifier-seq, but it does say that for
decl-specifier-seq in an ordinary declaration. Perhaps it
would be clearer just to allow a decl-specifier-seq here, and
then add a semantic restriction that if any decl-specifiers
that are not type-specifiers appear, the program is invalid. */
if (is_declaration && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
}
cp_parser_check_decl_spec (type_specifier_seq, start_token->location);
}
/* Parse a parameter-declaration-clause.
parameter-declaration-clause:
parameter-declaration-list [opt] ... [opt]
parameter-declaration-list , ...
Returns a representation for the parameter declarations. A return
value of NULL indicates a parameter-declaration-clause consisting
only of an ellipsis. */
static tree
cp_parser_parameter_declaration_clause (cp_parser* parser)
{
tree parameters;
cp_token *token;
bool ellipsis_p;
bool is_error;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for trivial parameter-declaration-clauses. */
if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL_TREE;
}
else if (token->type == CPP_CLOSE_PAREN)
/* There are no parameters. */
{
#ifndef NO_IMPLICIT_EXTERN_C
if (in_system_header && current_class_type == NULL
&& current_lang_name == lang_name_c)
return NULL_TREE;
else
#endif
return void_list_node;
}
/* Check for `(void)', too, which is a special case. */
else if (token->keyword == RID_VOID
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_CLOSE_PAREN))
{
/* Consume the `void' token. */
cp_lexer_consume_token (parser->lexer);
/* There are no parameters. */
return void_list_node;
}
/* Parse the parameter-declaration-list. */
parameters = cp_parser_parameter_declaration_list (parser, &is_error);
/* If a parse error occurred while parsing the
parameter-declaration-list, then the entire
parameter-declaration-clause is erroneous. */
if (is_error)
return NULL;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', the clause should terminate with an ellipsis. */
if (token->type == CPP_COMMA)
{
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
/* Expect an ellipsis. */
ellipsis_p
= (cp_parser_require (parser, CPP_ELLIPSIS, RT_ELLIPSIS) != NULL);
}
/* It might also be `...' if the optional trailing `,' was
omitted. */
else if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
/* And remember that we saw it. */
ellipsis_p = true;
}
else
ellipsis_p = false;
/* Finish the parameter list. */
if (!ellipsis_p)
parameters = chainon (parameters, void_list_node);
return parameters;
}
/* Parse a parameter-declaration-list.
parameter-declaration-list:
parameter-declaration
parameter-declaration-list , parameter-declaration
Returns a representation of the parameter-declaration-list, as for
cp_parser_parameter_declaration_clause. However, the
`void_list_node' is never appended to the list. Upon return,
*IS_ERROR will be true iff an error occurred. */
static tree
cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error)
{
tree parameters = NULL_TREE;
tree *tail = ¶meters;
bool saved_in_unbraced_linkage_specification_p;
int index = 0;
/* Assume all will go well. */
*is_error = false;
/* The special considerations that apply to a function within an
unbraced linkage specifications do not apply to the parameters
to the function. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Look for more parameters. */
while (true)
{
cp_parameter_declarator *parameter;
tree decl = error_mark_node;
bool parenthesized_p = false;
/* Parse the parameter. */
parameter
= cp_parser_parameter_declaration (parser,
/*template_parm_p=*/false,
&parenthesized_p);
/* We don't know yet if the enclosing context is deprecated, so wait
and warn in grokparms if appropriate. */
deprecated_state = DEPRECATED_SUPPRESS;
if (parameter)
decl = grokdeclarator (parameter->declarator,
¶meter->decl_specifiers,
PARM,
parameter->default_argument != NULL_TREE,
¶meter->decl_specifiers.attributes);
deprecated_state = DEPRECATED_NORMAL;
/* If a parse error occurred parsing the parameter declaration,
then the entire parameter-declaration-list is erroneous. */
if (decl == error_mark_node)
{
*is_error = true;
parameters = error_mark_node;
break;
}
if (parameter->decl_specifiers.attributes)
cplus_decl_attributes (&decl,
parameter->decl_specifiers.attributes,
0);
if (DECL_NAME (decl))
decl = pushdecl (decl);
if (decl != error_mark_node)
{
retrofit_lang_decl (decl);
DECL_PARM_INDEX (decl) = ++index;
DECL_PARM_LEVEL (decl) = function_parm_depth ();
}
/* Add the new parameter to the list. */
*tail = build_tree_list (parameter->default_argument, decl);
tail = &TREE_CHAIN (*tail);
/* Peek at the next token. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)
/* These are for Objective-C++ */
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
/* The parameter-declaration-list is complete. */
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an ellipsis, then the list is complete. */
if (token->type == CPP_ELLIPSIS)
break;
/* Otherwise, there must be more parameters. Consume the
`,'. */
cp_lexer_consume_token (parser->lexer);
/* When parsing something like:
int i(float f, double d)
we can tell after seeing the declaration for "f" that we
are not looking at an initialization of a variable "i",
but rather at the declaration of a function "i".
Due to the fact that the parsing of template arguments
(as specified to a template-id) requires backtracking we
cannot use this technique when inside a template argument
list. */
if (!parser->in_template_argument_list_p
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
/* However, a parameter-declaration of the form
"foat(f)" (which is a valid declaration of a
parameter "f") can also be interpreted as an
expression (the conversion of "f" to "float"). */
&& !parenthesized_p)
cp_parser_commit_to_tentative_parse (parser);
}
else
{
cp_parser_error (parser, "expected %<,%> or %<...%>");
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/false);
break;
}
}
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
return parameters;
}
/* Parse a parameter declaration.
parameter-declaration:
decl-specifier-seq ... [opt] declarator
decl-specifier-seq declarator = assignment-expression
decl-specifier-seq ... [opt] abstract-declarator [opt]
decl-specifier-seq abstract-declarator [opt] = assignment-expression
If TEMPLATE_PARM_P is TRUE, then this parameter-declaration
declares a template parameter. (In that case, a non-nested `>'
token encountered during the parsing of the assignment-expression
is not interpreted as a greater-than operator.)
Returns a representation of the parameter, or NULL if an error
occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to
true iff the declarator is of the form "(p)". */
static cp_parameter_declarator *
cp_parser_parameter_declaration (cp_parser *parser,
bool template_parm_p,
bool *parenthesized_p)
{
int declares_class_or_enum;
cp_decl_specifier_seq decl_specifiers;
cp_declarator *declarator;
tree default_argument;
cp_token *token = NULL, *declarator_token_start = NULL;
const char *saved_message;
/* In a template parameter, `>' is not an operator.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
/* Type definitions may not appear in parameter types. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in parameter types");
/* Parse the declaration-specifiers. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_NONE,
&decl_specifiers,
&declares_class_or_enum);
/* Complain about missing 'typename' or other invalid type names. */
if (!decl_specifiers.any_type_specifiers_p)
cp_parser_parse_and_diagnose_invalid_type_name (parser);
/* If an error occurred, there's no reason to attempt to parse the
rest of the declaration. */
if (cp_parser_error_occurred (parser))
{
parser->type_definition_forbidden_message = saved_message;
return NULL;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a `)', `,', `=', `>', or `...', then there
is no declarator. However, when variadic templates are enabled,
there may be a declarator following `...'. */
if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_GREATER)
{
declarator = NULL;
if (parenthesized_p)
*parenthesized_p = false;
}
/* Otherwise, there should be a declarator. */
else
{
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
parser->default_arg_ok_p = false;
/* After seeing a decl-specifier-seq, if the next token is not a
"(", there is no possibility that the code is a valid
expression. Therefore, if parsing tentatively, we commit at
this point. */
if (!parser->in_template_argument_list_p
/* In an expression context, having seen:
(int((char ...
we cannot be sure whether we are looking at a
function-type (taking a "char" as a parameter) or a cast
of some object of type "char" to "int". */
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
cp_parser_commit_to_tentative_parse (parser);
/* Parse the declarator. */
declarator_token_start = token;
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
parenthesized_p,
/*member_p=*/false);
parser->default_arg_ok_p = saved_default_arg_ok_p;
/* After the declarator, allow more attributes. */
decl_specifiers.attributes
= chainon (decl_specifiers.attributes,
cp_parser_attributes_opt (parser));
}
/* If the next token is an ellipsis, and we have not seen a
declarator name, and the type of the declarator contains parameter
packs but it is not a TYPE_PACK_EXPANSION, then we actually have
a parameter pack expansion expression. Otherwise, leave the
ellipsis for a C-style variadic function. */
token = cp_lexer_peek_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
tree type = decl_specifiers.type;
if (type && DECL_P (type))
type = TREE_TYPE (type);
if (type
&& TREE_CODE (type) != TYPE_PACK_EXPANSION
&& declarator_can_be_parameter_pack (declarator)
&& (!declarator || !declarator->parameter_pack_p)
&& uses_parameter_packs (type))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
/* Build a pack expansion type */
if (declarator)
declarator->parameter_pack_p = true;
else
decl_specifiers.type = make_pack_expansion (type);
}
}
/* The restriction on defining new types applies only to the type
of the parameter, not to the default argument. */
parser->type_definition_forbidden_message = saved_message;
/* If the next token is `=', then process a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
token = cp_lexer_peek_token (parser->lexer);
/* If we are defining a class, then the tokens that make up the
default argument must be saved and processed later. */
if (!template_parm_p && at_class_scope_p ()
&& TYPE_BEING_DEFINED (current_class_type)
&& !LAMBDA_TYPE_P (current_class_type))
default_argument = cp_parser_cache_defarg (parser, /*nsdmi=*/false);
/* Outside of a class definition, we can just parse the
assignment-expression. */
else
default_argument
= cp_parser_default_argument (parser, template_parm_p);
if (!parser->default_arg_ok_p)
{
if (flag_permissive)
warning (0, "deprecated use of default argument for parameter of non-function");
else
{
error_at (token->location,
"default arguments are only "
"permitted for function parameters");
default_argument = NULL_TREE;
}
}
else if ((declarator && declarator->parameter_pack_p)
|| (decl_specifiers.type
&& PACK_EXPANSION_P (decl_specifiers.type)))
{
/* Find the name of the parameter pack. */
cp_declarator *id_declarator = declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
if (id_declarator && id_declarator->kind == cdk_id)
error_at (declarator_token_start->location,
template_parm_p
? G_("template parameter pack %qD "
"cannot have a default argument")
: G_("parameter pack %qD cannot have "
"a default argument"),
id_declarator->u.id.unqualified_name);
else
error_at (declarator_token_start->location,
template_parm_p
? G_("template parameter pack cannot have "
"a default argument")
: G_("parameter pack cannot have a "
"default argument"));
default_argument = NULL_TREE;
}
}
else
default_argument = NULL_TREE;
return make_parameter_declarator (&decl_specifiers,
declarator,
default_argument);
}
/* Parse a default argument and return it.
TEMPLATE_PARM_P is true if this is a default argument for a
non-type template parameter. */
static tree
cp_parser_default_argument (cp_parser *parser, bool template_parm_p)
{
tree default_argument = NULL_TREE;
bool saved_greater_than_is_operator_p;
bool saved_local_variables_forbidden_p;
bool non_constant_p, is_direct_init;
/* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is
set correctly. */
saved_greater_than_is_operator_p = parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = !template_parm_p;
/* Local variable names (and the `this' keyword) may not
appear in a default argument. */
saved_local_variables_forbidden_p = parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
/* Parse the assignment-expression. */
if (template_parm_p)
push_deferring_access_checks (dk_no_deferred);
default_argument
= cp_parser_initializer (parser, &is_direct_init, &non_constant_p);
if (BRACE_ENCLOSED_INITIALIZER_P (default_argument))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
if (template_parm_p)
pop_deferring_access_checks ();
parser->greater_than_is_operator_p = saved_greater_than_is_operator_p;
parser->local_variables_forbidden_p = saved_local_variables_forbidden_p;
return default_argument;
}
/* Parse a function-body.
function-body:
compound_statement */
static void
cp_parser_function_body (cp_parser *parser)
{
cp_parser_compound_statement (parser, NULL, false, true);
}
/* Parse a ctor-initializer-opt followed by a function-body. Return
true if a ctor-initializer was present. */
static bool
cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser)
{
tree body, list;
bool ctor_initializer_p;
const bool check_body_p =
DECL_CONSTRUCTOR_P (current_function_decl)
&& DECL_DECLARED_CONSTEXPR_P (current_function_decl);
tree last = NULL;
/* Begin the function body. */
body = begin_function_body ();
/* Parse the optional ctor-initializer. */
ctor_initializer_p = cp_parser_ctor_initializer_opt (parser);
/* If we're parsing a constexpr constructor definition, we need
to check that the constructor body is indeed empty. However,
before we get to cp_parser_function_body lot of junk has been
generated, so we can't just check that we have an empty block.
Rather we take a snapshot of the outermost block, and check whether
cp_parser_function_body changed its state. */
if (check_body_p)
{
list = cur_stmt_list;
if (STATEMENT_LIST_TAIL (list))
last = STATEMENT_LIST_TAIL (list)->stmt;
}
/* Parse the function-body. */
cp_parser_function_body (parser);
if (check_body_p)
check_constexpr_ctor_body (last, list);
/* Finish the function body. */
finish_function_body (body);
return ctor_initializer_p;
}
/* Parse an initializer.
initializer:
= initializer-clause
( expression-list )
Returns an expression representing the initializer. If no
initializer is present, NULL_TREE is returned.
*IS_DIRECT_INIT is set to FALSE if the `= initializer-clause'
production is used, and TRUE otherwise. *IS_DIRECT_INIT is
set to TRUE if there is no initializer present. If there is an
initializer, and it is not a constant-expression, *NON_CONSTANT_P
is set to true; otherwise it is set to false. */
static tree
cp_parser_initializer (cp_parser* parser, bool* is_direct_init,
bool* non_constant_p)
{
cp_token *token;
tree init;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Let our caller know whether or not this initializer was
parenthesized. */
*is_direct_init = (token->type != CPP_EQ);
/* Assume that the initializer is constant. */
*non_constant_p = false;
if (token->type == CPP_EQ)
{
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the initializer-clause. */
init = cp_parser_initializer_clause (parser, non_constant_p);
}
else if (token->type == CPP_OPEN_PAREN)
{
VEC(tree,gc) *vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
non_constant_p);
if (vec == NULL)
return error_mark_node;
init = build_tree_list_vec (vec);
release_tree_vector (vec);
}
else if (token->type == CPP_OPEN_BRACE)
{
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
init = cp_parser_braced_list (parser, non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (init) = 1;
}
else
{
/* Anything else is an error. */
cp_parser_error (parser, "expected initializer");
init = error_mark_node;
}
return init;
}
/* Parse an initializer-clause.
initializer-clause:
assignment-expression
braced-init-list
Returns an expression representing the initializer.
If the `assignment-expression' production is used the value
returned is simply a representation for the expression.
Otherwise, calls cp_parser_braced_list. */
static tree
cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p)
{
tree initializer;
/* Assume the expression is constant. */
*non_constant_p = false;
/* If it is not a `{', then we are looking at an
assignment-expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
{
initializer
= cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
non_constant_p);
}
else
initializer = cp_parser_braced_list (parser, non_constant_p);
return initializer;
}
/* Parse a brace-enclosed initializer list.
braced-init-list:
{ initializer-list , [opt] }
{ }
Returns a CONSTRUCTOR. The CONSTRUCTOR_ELTS will be
the elements of the initializer-list (or NULL, if the last
production is used). The TREE_TYPE for the CONSTRUCTOR will be
NULL_TREE. There is no way to detect whether or not the optional
trailing `,' was provided. NON_CONSTANT_P is as for
cp_parser_initializer. */
static tree
cp_parser_braced_list (cp_parser* parser, bool* non_constant_p)
{
tree initializer;
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Create a CONSTRUCTOR to represent the braced-initializer. */
initializer = make_node (CONSTRUCTOR);
/* If it's not a `}', then there is a non-trivial initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE))
{
/* Parse the initializer list. */
CONSTRUCTOR_ELTS (initializer)
= cp_parser_initializer_list (parser, non_constant_p);
/* A trailing `,' token is allowed. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
}
/* Now, there should be a trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
TREE_TYPE (initializer) = init_list_type_node;
return initializer;
}
/* Parse an initializer-list.
initializer-list:
initializer-clause ... [opt]
initializer-list , initializer-clause ... [opt]
GNU Extension:
initializer-list:
designation initializer-clause ...[opt]
initializer-list , designation initializer-clause ...[opt]
designation:
. identifier =
identifier :
[ constant-expression ] =
Returns a VEC of constructor_elt. The VALUE of each elt is an expression
for the initializer. If the INDEX of the elt is non-NULL, it is the
IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is
as for cp_parser_initializer. */
static VEC(constructor_elt,gc) *
cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p)
{
VEC(constructor_elt,gc) *v = NULL;
/* Assume all of the expressions are constant. */
*non_constant_p = false;
/* Parse the rest of the list. */
while (true)
{
cp_token *token;
tree designator;
tree initializer;
bool clause_non_constant_p;
/* If the next token is an identifier and the following one is a
colon, we are looking at the GNU designated-initializer
syntax. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)
{
/* Warn the user that they are using an extension. */
pedwarn (input_location, OPT_pedantic,
"ISO C++ does not allow designated initializers");
/* Consume the identifier. */
designator = cp_lexer_consume_token (parser->lexer)->u.value;
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
}
/* Also handle the C99 syntax, '. id ='. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_DOT)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ)
{
/* Warn the user that they are using an extension. */
pedwarn (input_location, OPT_pedantic,
"ISO C++ does not allow C99 designated initializers");
/* Consume the `.'. */
cp_lexer_consume_token (parser->lexer);
/* Consume the identifier. */
designator = cp_lexer_consume_token (parser->lexer)->u.value;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
}
/* Also handle C99 array designators, '[ const ] ='. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& !c_dialect_objc ()
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* In C++11, [ could start a lambda-introducer. */
cp_parser_parse_tentatively (parser);
cp_lexer_consume_token (parser->lexer);
designator = cp_parser_constant_expression (parser, false, NULL);
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
cp_parser_require (parser, CPP_EQ, RT_EQ);
if (!cp_parser_parse_definitely (parser))
designator = NULL_TREE;
}
else
designator = NULL_TREE;
/* Parse the initializer. */
initializer = cp_parser_initializer_clause (parser,
&clause_non_constant_p);
/* If any clause is non-constant, so is the entire initializer. */
if (clause_non_constant_p)
*non_constant_p = true;
/* If we have an ellipsis, this is an initializer pack
expansion. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Turn the initializer into an initializer expansion. */
initializer = make_pack_expansion (initializer);
}
/* Add it to the vector. */
CONSTRUCTOR_APPEND_ELT (v, designator, initializer);
/* If the next token is not a comma, we have reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If the next token is a `}', then we're still done. An
initializer-clause can have a trailing `,' after the
initializer-list and before the closing `}'. */
if (token->type == CPP_CLOSE_BRACE)
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return v;
}
/* Classes [gram.class] */
/* Parse a class-name.
class-name:
identifier
template-id
TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used
to indicate that names looked up in dependent types should be
assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template'
keyword has been used to indicate that the name that appears next
is a template. TAG_TYPE indicates the explicit tag given before
the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are
looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class
is the class being defined in a class-head.
Returns the TYPE_DECL representing the class. */
static tree
cp_parser_class_name (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
enum tag_types tag_type,
bool check_dependency_p,
bool class_head_p,
bool is_declaration)
{
tree decl;
tree scope;
bool typename_p;
cp_token *token;
tree identifier = NULL_TREE;
/* All class-names start with an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID)
{
cp_parser_error (parser, "expected class-name");
return error_mark_node;
}
/* PARSER->SCOPE can be cleared when parsing the template-arguments
to a template-id, so we save it here. */
scope = parser->scope;
if (scope == error_mark_node)
return error_mark_node;
/* Any name names a type if we're following the `typename' keyword
in a qualified name where the enclosing scope is type-dependent. */
typename_p = (typename_keyword_p && scope && TYPE_P (scope)
&& dependent_type_p (scope));
/* Handle the common case (an identifier, but not a template-id)
efficiently. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))
{
cp_token *identifier_token;
bool ambiguous_p;
/* Look for the identifier. */
identifier_token = cp_lexer_peek_token (parser->lexer);
ambiguous_p = identifier_token->ambiguous_p;
identifier = cp_parser_identifier (parser);
/* If the next token isn't an identifier, we are certainly not
looking at a class-name. */
if (identifier == error_mark_node)
decl = error_mark_node;
/* If we know this is a type-name, there's no need to look it
up. */
else if (typename_p)
decl = identifier;
else
{
tree ambiguous_decls;
/* If we already know that this lookup is ambiguous, then
we've already issued an error message; there's no reason
to check again. */
if (ambiguous_p)
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* If the next token is a `::', then the name must be a type
name.
[basic.lookup.qual]
During the lookup for a name preceding the :: scope
resolution operator, object, function, and enumerator
names are ignored. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
tag_type = typename_type;
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
check_dependency_p,
&ambiguous_decls,
identifier_token->location);
if (ambiguous_decls)
{
if (cp_parser_parsing_tentatively (parser))
cp_parser_simulate_error (parser);
return error_mark_node;
}
}
}
else
{
/* Try a template-id. */
decl = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
is_declaration);
if (decl == error_mark_node)
return error_mark_node;
}
decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p);
/* If this is a typename, create a TYPENAME_TYPE. */
if (typename_p && decl != error_mark_node)
{
decl = make_typename_type (scope, decl, typename_type,
/*complain=*/tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
decl = strip_using_decl (decl);
/* Check to see that it is really the name of a class. */
if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& TREE_CODE (TREE_OPERAND (decl, 0)) == IDENTIFIER_NODE
&& cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* Situations like this:
template <typename T> struct A {
typename T::template X<int>::I i;
};
are problematic. Is `T::template X<int>' a class-name? The
standard does not seem to be definitive, but there is no other
valid interpretation of the following `::'. Therefore, those
names are considered class-names. */
{
decl = make_typename_type (scope, decl, tag_type, tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
else if (TREE_CODE (decl) != TYPE_DECL
|| TREE_TYPE (decl) == error_mark_node
|| !MAYBE_CLASS_TYPE_P (TREE_TYPE (decl))
/* In Objective-C 2.0, a classname followed by '.' starts a
dot-syntax expression, and it's not a type-name. */
|| (c_dialect_objc ()
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT
&& objc_is_class_name (decl)))
decl = error_mark_node;
if (decl == error_mark_node)
cp_parser_error (parser, "expected class-name");
else if (identifier && !parser->scope)
maybe_note_name_used_in_class (identifier, decl);
return decl;
}
/* Parse a class-specifier.
class-specifier:
class-head { member-specification [opt] }
Returns the TREE_TYPE representing the class. */
static tree
cp_parser_class_specifier_1 (cp_parser* parser)
{
tree type;
tree attributes = NULL_TREE;
bool nested_name_specifier_p;
unsigned saved_num_template_parameter_lists;
bool saved_in_function_body;
unsigned char in_statement;
bool in_switch_statement_p;
bool saved_in_unbraced_linkage_specification_p;
tree old_scope = NULL_TREE;
tree scope = NULL_TREE;
cp_token *closing_brace;
push_deferring_access_checks (dk_no_deferred);
/* Parse the class-head. */
type = cp_parser_class_head (parser,
&nested_name_specifier_p);
/* If the class-head was a semantic disaster, skip the entire body
of the class. */
if (!type)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
pop_deferring_access_checks ();
return error_mark_node;
}
/* Look for the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* Issue an error message if type-definitions are forbidden here. */
cp_parser_check_type_definition (parser);
/* Remember that we are defining one more class. */
++parser->num_classes_being_defined;
/* Inside the class, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* We are not in a function body. */
saved_in_function_body = parser->in_function_body;
parser->in_function_body = false;
/* Or in a loop. */
in_statement = parser->in_statement;
parser->in_statement = 0;
/* Or in a switch. */
in_switch_statement_p = parser->in_switch_statement_p;
parser->in_switch_statement_p = false;
/* We are not immediately inside an extern "lang" block. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Start the class. */
if (nested_name_specifier_p)
{
scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type));
old_scope = push_inner_scope (scope);
}
type = begin_class_definition (type);
if (type == error_mark_node)
/* If the type is erroneous, skip the entire body of the class. */
cp_parser_skip_to_closing_brace (parser);
else
/* Parse the member-specification. */
cp_parser_member_specification_opt (parser);
/* Look for the trailing `}'. */
closing_brace = cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
/* Look for trailing attributes to apply to this class. */
if (cp_parser_allow_gnu_extensions_p (parser))
attributes = cp_parser_attributes_opt (parser);
if (type != error_mark_node)
type = finish_struct (type, attributes);
if (nested_name_specifier_p)
pop_inner_scope (old_scope, scope);
/* We've finished a type definition. Check for the common syntax
error of forgetting a semicolon after the definition. We need to
be careful, as we can't just check for not-a-semicolon and be done
with it; the user might have typed:
class X { } c = ...;
class X { } *p = ...;
and so forth. Instead, enumerate all the possible tokens that
might follow this production; if we don't see one of them, then
complain and silently insert the semicolon. */
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
bool want_semicolon = true;
switch (token->type)
{
case CPP_NAME:
case CPP_SEMICOLON:
case CPP_MULT:
case CPP_AND:
case CPP_OPEN_PAREN:
case CPP_CLOSE_PAREN:
case CPP_COMMA:
want_semicolon = false;
break;
/* While it's legal for type qualifiers and storage class
specifiers to follow type definitions in the grammar, only
compiler testsuites contain code like that. Assume that if
we see such code, then what we're really seeing is a case
like:
class X { }
const <type> var = ...;
or
class Y { }
static <type> func (...) ...
i.e. the qualifier or specifier applies to the next
declaration. To do so, however, we need to look ahead one
more token to see if *that* token is a type specifier.
This code could be improved to handle:
class Z { }
static const <type> var = ...; */
case CPP_KEYWORD:
if (keyword_is_decl_specifier (token->keyword))
{
cp_token *lookahead = cp_lexer_peek_nth_token (parser->lexer, 2);
/* Handling user-defined types here would be nice, but very
tricky. */
want_semicolon
= (lookahead->type == CPP_KEYWORD
&& keyword_begins_type_specifier (lookahead->keyword));
}
break;
default:
break;
}
/* If we don't have a type, then something is very wrong and we
shouldn't try to do anything clever. Likewise for not seeing the
closing brace. */
if (closing_brace && TYPE_P (type) && want_semicolon)
{
cp_token_position prev
= cp_lexer_previous_token_position (parser->lexer);
cp_token *prev_token = cp_lexer_token_at (parser->lexer, prev);
location_t loc = prev_token->location;
if (CLASSTYPE_DECLARED_CLASS (type))
error_at (loc, "expected %<;%> after class definition");
else if (TREE_CODE (type) == RECORD_TYPE)
error_at (loc, "expected %<;%> after struct definition");
else if (TREE_CODE (type) == UNION_TYPE)
error_at (loc, "expected %<;%> after union definition");
else
gcc_unreachable ();
/* Unget one token and smash it to look as though we encountered
a semicolon in the input stream. */
cp_lexer_set_token_position (parser->lexer, prev);
token = cp_lexer_peek_token (parser->lexer);
token->type = CPP_SEMICOLON;
token->keyword = RID_MAX;
}
}
/* If this class is not itself within the scope of another class,
then we need to parse the bodies of all of the queued function
definitions. Note that the queued functions defined in a class
are not always processed immediately following the
class-specifier for that class. Consider:
struct A {
struct B { void f() { sizeof (A); } };
};
If `f' were processed before the processing of `A' were
completed, there would be no way to compute the size of `A'.
Note that the nesting we are interested in here is lexical --
not the semantic nesting given by TYPE_CONTEXT. In particular,
for:
struct A { struct B; };
struct A::B { void f() { } };
there is no need to delay the parsing of `A::B::f'. */
if (--parser->num_classes_being_defined == 0)
{
tree decl;
tree class_type = NULL_TREE;
tree pushed_scope = NULL_TREE;
unsigned ix;
cp_default_arg_entry *e;
tree save_ccp, save_ccr;
/* In a first pass, parse default arguments to the functions.
Then, in a second pass, parse the bodies of the functions.
This two-phased approach handles cases like:
struct S {
void f() { g(); }
void g(int i = 3);
};
*/
FOR_EACH_VEC_ELT (cp_default_arg_entry, unparsed_funs_with_default_args,
ix, e)
{
decl = e->decl;
/* If there are default arguments that have not yet been processed,
take care of them now. */
if (class_type != e->class_type)
{
if (pushed_scope)
pop_scope (pushed_scope);
class_type = e->class_type;
pushed_scope = push_scope (class_type);
}
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (decl);
/* Parse the default argument expressions. */
cp_parser_late_parsing_default_args (parser, decl);
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
}
VEC_truncate (cp_default_arg_entry, unparsed_funs_with_default_args, 0);
/* Now parse any NSDMIs. */
save_ccp = current_class_ptr;
save_ccr = current_class_ref;
FOR_EACH_VEC_ELT (tree, unparsed_nsdmis, ix, decl)
{
if (class_type != DECL_CONTEXT (decl))
{
if (pushed_scope)
pop_scope (pushed_scope);
class_type = DECL_CONTEXT (decl);
pushed_scope = push_scope (class_type);
}
inject_this_parameter (class_type, TYPE_UNQUALIFIED);
cp_parser_late_parsing_nsdmi (parser, decl);
}
VEC_truncate (tree, unparsed_nsdmis, 0);
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
if (pushed_scope)
pop_scope (pushed_scope);
/* Now parse the body of the functions. */
FOR_EACH_VEC_ELT (tree, unparsed_funs_with_definitions, ix, decl)
cp_parser_late_parsing_for_member (parser, decl);
VEC_truncate (tree, unparsed_funs_with_definitions, 0);
}
/* Put back any saved access checks. */
pop_deferring_access_checks ();
/* Restore saved state. */
parser->in_switch_statement_p = in_switch_statement_p;
parser->in_statement = in_statement;
parser->in_function_body = saved_in_function_body;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
return type;
}
static tree
cp_parser_class_specifier (cp_parser* parser)
{
tree ret;
timevar_push (TV_PARSE_STRUCT);
ret = cp_parser_class_specifier_1 (parser);
timevar_pop (TV_PARSE_STRUCT);
return ret;
}
/* Parse a class-head.
class-head:
class-key identifier [opt] base-clause [opt]
class-key nested-name-specifier identifier class-virt-specifier [opt] base-clause [opt]
class-key nested-name-specifier [opt] template-id
base-clause [opt]
class-virt-specifier:
final
GNU Extensions:
class-key attributes identifier [opt] base-clause [opt]
class-key attributes nested-name-specifier identifier base-clause [opt]
class-key attributes nested-name-specifier [opt] template-id
base-clause [opt]
Upon return BASES is initialized to the list of base classes (or
NULL, if there are none) in the same form returned by
cp_parser_base_clause.
Returns the TYPE of the indicated class. Sets
*NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions
involving a nested-name-specifier was used, and FALSE otherwise.
Returns error_mark_node if this is not a class-head.
Returns NULL_TREE if the class-head is syntactically valid, but
semantically invalid in a way that means we should skip the entire
body of the class. */
static tree
cp_parser_class_head (cp_parser* parser,
bool* nested_name_specifier_p)
{
tree nested_name_specifier;
enum tag_types class_key;
tree id = NULL_TREE;
tree type = NULL_TREE;
tree attributes;
tree bases;
cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED;
bool template_id_p = false;
bool qualified_p = false;
bool invalid_nested_name_p = false;
bool invalid_explicit_specialization_p = false;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
tree pushed_scope = NULL_TREE;
unsigned num_templates;
cp_token *type_start_token = NULL, *nested_name_specifier_token_start = NULL;
/* Assume no nested-name-specifier will be present. */
*nested_name_specifier_p = false;
/* Assume no template parameter lists will be used in defining the
type. */
num_templates = 0;
parser->colon_corrects_to_scope_p = false;
/* Look for the class-key. */
class_key = cp_parser_class_key (parser);
if (class_key == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* If the next token is `::', that is invalid -- but sometimes
people do try to write:
struct ::S {};
Handle this gracefully by accepting the extra qualifier, and then
issuing an error about it later if this really is a
class-head. If it turns out just to be an elaborated type
specifier, remain silent. */
if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false))
qualified_p = true;
push_deferring_access_checks (dk_no_check);
/* Determine the name of the class. Begin by looking for an
optional nested-name-specifier. */
nested_name_specifier_token_start = cp_lexer_peek_token (parser->lexer);
nested_name_specifier
= cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false);
/* If there was a nested-name-specifier, then there *must* be an
identifier. */
if (nested_name_specifier)
{
type_start_token = cp_lexer_peek_token (parser->lexer);
/* Although the grammar says `identifier', it really means
`class-name' or `template-name'. You are only allowed to
define a class that has already been declared with this
syntax.
The proposed resolution for Core Issue 180 says that wherever
you see `class T::X' you should treat `X' as a type-name.
It is OK to define an inaccessible class; for example:
class A { class B; };
class A::B {};
We do not know if we will see a class-name, or a
template-name. We look for a class-name first, in case the
class-name is a template-id; if we looked for the
template-name first we would stop after the template-name. */
cp_parser_parse_tentatively (parser);
type = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
class_type,
/*check_dependency_p=*/false,
/*class_head_p=*/true,
/*is_declaration=*/false);
/* If that didn't work, ignore the nested-name-specifier. */
if (!cp_parser_parse_definitely (parser))
{
invalid_nested_name_p = true;
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_identifier (parser);
if (id == error_mark_node)
id = NULL_TREE;
}
/* If we could not find a corresponding TYPE, treat this
declaration like an unqualified declaration. */
if (type == error_mark_node)
nested_name_specifier = NULL_TREE;
/* Otherwise, count the number of templates used in TYPE and its
containing scopes. */
else
{
tree scope;
for (scope = TREE_TYPE (type);
scope && TREE_CODE (scope) != NAMESPACE_DECL;
scope = (TYPE_P (scope)
? TYPE_CONTEXT (scope)
: DECL_CONTEXT (scope)))
if (TYPE_P (scope)
&& CLASS_TYPE_P (scope)
&& CLASSTYPE_TEMPLATE_INFO (scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope))
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (scope))
++num_templates;
}
}
/* Otherwise, the identifier is optional. */
else
{
/* We don't know whether what comes next is a template-id,
an identifier, or nothing at all. */
cp_parser_parse_tentatively (parser);
/* Check for a template-id. */
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*is_declaration=*/true);
/* If that didn't work, it could still be an identifier. */
if (!cp_parser_parse_definitely (parser))
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_identifier (parser);
}
else
id = NULL_TREE;
}
else
{
template_id_p = true;
++num_templates;
}
}
pop_deferring_access_checks ();
if (id)
{
cp_parser_check_for_invalid_template_id (parser, id,
type_start_token->location);
}
virt_specifiers = cp_parser_virt_specifier_seq_opt (parser);
/* If it's not a `:' or a `{' then we can't really be looking at a
class-head, since a class-head only appears as part of a
class-specifier. We have to detect this situation before calling
xref_tag, since that has irreversible side-effects. */
if (!cp_parser_next_token_starts_class_definition_p (parser))
{
cp_parser_error (parser, "expected %<{%> or %<:%>");
type = error_mark_node;
goto out;
}
/* At this point, we're going ahead with the class-specifier, even
if some other problem occurs. */
cp_parser_commit_to_tentative_parse (parser);
if (virt_specifiers & VIRT_SPEC_OVERRIDE)
{
cp_parser_error (parser,
"cannot specify %<override%> for a class");
type = error_mark_node;
goto out;
}
/* Issue the error about the overly-qualified name now. */
if (qualified_p)
{
cp_parser_error (parser,
"global qualification of class name is invalid");
type = error_mark_node;
goto out;
}
else if (invalid_nested_name_p)
{
cp_parser_error (parser,
"qualified name does not name a class");
type = error_mark_node;
goto out;
}
else if (nested_name_specifier)
{
tree scope;
/* Reject typedef-names in class heads. */
if (!DECL_IMPLICIT_TYPEDEF_P (type))
{
error_at (type_start_token->location,
"invalid class name in declaration of %qD",
type);
type = NULL_TREE;
goto done;
}
/* Figure out in what scope the declaration is being placed. */
scope = current_scope ();
/* If that scope does not contain the scope in which the
class was originally declared, the program is invalid. */
if (scope && !is_ancestor (scope, nested_name_specifier))
{
if (at_namespace_scope_p ())
error_at (type_start_token->location,
"declaration of %qD in namespace %qD which does not "
"enclose %qD",
type, scope, nested_name_specifier);
else
error_at (type_start_token->location,
"declaration of %qD in %qD which does not enclose %qD",
type, scope, nested_name_specifier);
type = NULL_TREE;
goto done;
}
/* [dcl.meaning]
A declarator-id shall not be qualified except for the
definition of a ... nested class outside of its class
... [or] the definition or explicit instantiation of a
class member of a namespace outside of its namespace. */
if (scope == nested_name_specifier)
{
permerror (nested_name_specifier_token_start->location,
"extra qualification not allowed");
nested_name_specifier = NULL_TREE;
num_templates = 0;
}
}
/* An explicit-specialization must be preceded by "template <>". If
it is not, try to recover gracefully. */
if (at_namespace_scope_p ()
&& parser->num_template_parameter_lists == 0
&& template_id_p)
{
error_at (type_start_token->location,
"an explicit specialization must be preceded by %<template <>%>");
invalid_explicit_specialization_p = true;
/* Take the same action that would have been taken by
cp_parser_explicit_specialization. */
++parser->num_template_parameter_lists;
begin_specialization ();
}
/* There must be no "return" statements between this point and the
end of this function; set "type "to the correct return value and
use "goto done;" to return. */
/* Make sure that the right number of template parameters were
present. */
if (!cp_parser_check_template_parameters (parser, num_templates,
type_start_token->location,
/*declarator=*/NULL))
{
/* If something went wrong, there is no point in even trying to
process the class-definition. */
type = NULL_TREE;
goto done;
}
/* Look up the type. */
if (template_id_p)
{
if (TREE_CODE (id) == TEMPLATE_ID_EXPR
&& (DECL_FUNCTION_TEMPLATE_P (TREE_OPERAND (id, 0))
|| TREE_CODE (TREE_OPERAND (id, 0)) == OVERLOAD))
{
error_at (type_start_token->location,
"function template %qD redeclared as a class template", id);
type = error_mark_node;
}
else
{
type = TREE_TYPE (id);
type = maybe_process_partial_specialization (type);
}
if (nested_name_specifier)
pushed_scope = push_scope (nested_name_specifier);
}
else if (nested_name_specifier)
{
tree class_type;
/* Given:
template <typename T> struct S { struct T };
template <typename T> struct S<T>::T { };
we will get a TYPENAME_TYPE when processing the definition of
`S::T'. We need to resolve it to the actual type before we
try to define it. */
if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE)
{
class_type = resolve_typename_type (TREE_TYPE (type),
/*only_current_p=*/false);
if (TREE_CODE (class_type) != TYPENAME_TYPE)
type = TYPE_NAME (class_type);
else
{
cp_parser_error (parser, "could not resolve typename type");
type = error_mark_node;
}
}
if (maybe_process_partial_specialization (TREE_TYPE (type))
== error_mark_node)
{
type = NULL_TREE;
goto done;
}
class_type = current_class_type;
/* Enter the scope indicated by the nested-name-specifier. */
pushed_scope = push_scope (nested_name_specifier);
/* Get the canonical version of this type. */
type = TYPE_MAIN_DECL (TREE_TYPE (type));
if (PROCESSING_REAL_TEMPLATE_DECL_P ()
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type)))
{
type = push_template_decl (type);
if (type == error_mark_node)
{
type = NULL_TREE;
goto done;
}
}
type = TREE_TYPE (type);
*nested_name_specifier_p = true;
}
else /* The name is not a nested name. */
{
/* If the class was unnamed, create a dummy name. */
if (!id)
id = make_anon_name ();
type = xref_tag (class_key, id, /*tag_scope=*/ts_current,
parser->num_template_parameter_lists);
}
/* Indicate whether this class was declared as a `class' or as a
`struct'. */
if (TREE_CODE (type) == RECORD_TYPE)
CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type);
cp_parser_check_class_key (class_key, type);
/* If this type was already complete, and we see another definition,
that's an error. */
if (type != error_mark_node && COMPLETE_TYPE_P (type))
{
error_at (type_start_token->location, "redefinition of %q#T",
type);
error_at (type_start_token->location, "previous definition of %q+#T",
type);
type = NULL_TREE;
goto done;
}
else if (type == error_mark_node)
type = NULL_TREE;
if (type)
{
/* Apply attributes now, before any use of the class as a template
argument in its base list. */
cplus_decl_attributes (&type, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE);
fixup_attribute_variants (type);
}
/* We will have entered the scope containing the class; the names of
base classes should be looked up in that context. For example:
struct A { struct B {}; struct C; };
struct A::C : B {};
is valid. */
/* Get the list of base-classes, if there is one. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
bases = cp_parser_base_clause (parser);
else
bases = NULL_TREE;
/* If we're really defining a class, process the base classes.
If they're invalid, fail. */
if (type && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)
&& !xref_basetypes (type, bases))
type = NULL_TREE;
done:
/* Leave the scope given by the nested-name-specifier. We will
enter the class scope itself while processing the members. */
if (pushed_scope)
pop_scope (pushed_scope);
if (invalid_explicit_specialization_p)
{
end_specialization ();
--parser->num_template_parameter_lists;
}
if (type)
DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location;
if (type && (virt_specifiers & VIRT_SPEC_FINAL))
CLASSTYPE_FINAL (type) = 1;
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
return type;
}
/* Parse a class-key.
class-key:
class
struct
union
Returns the kind of class-key specified, or none_type to indicate
error. */
static enum tag_types
cp_parser_class_key (cp_parser* parser)
{
cp_token *token;
enum tag_types tag_type;
/* Look for the class-key. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_KEY);
if (!token)
return none_type;
/* Check to see if the TOKEN is a class-key. */
tag_type = cp_parser_token_is_class_key (token);
if (!tag_type)
cp_parser_error (parser, "expected class-key");
return tag_type;
}
/* Parse an (optional) member-specification.
member-specification:
member-declaration member-specification [opt]
access-specifier : member-specification [opt] */
static void
cp_parser_member_specification_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
enum rid keyword;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `}', or EOF then we've seen all the members. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
/* See if this token is a keyword. */
keyword = token->keyword;
switch (keyword)
{
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
/* Remember which access-specifier is active. */
current_access_specifier = token->u.value;
/* Look for the `:'. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
break;
default:
/* Accept #pragmas at class scope. */
if (token->type == CPP_PRAGMA)
{
cp_parser_pragma (parser, pragma_external);
break;
}
/* Otherwise, the next construction must be a
member-declaration. */
cp_parser_member_declaration (parser);
}
}
}
/* Parse a member-declaration.
member-declaration:
decl-specifier-seq [opt] member-declarator-list [opt] ;
function-definition ; [opt]
:: [opt] nested-name-specifier template [opt] unqualified-id ;
using-declaration
template-declaration
alias-declaration
member-declarator-list:
member-declarator
member-declarator-list , member-declarator
member-declarator:
declarator pure-specifier [opt]
declarator constant-initializer [opt]
identifier [opt] : constant-expression
GNU Extensions:
member-declaration:
__extension__ member-declaration
member-declarator:
declarator attributes [opt] pure-specifier [opt]
declarator attributes [opt] constant-initializer [opt]
identifier [opt] attributes [opt] : constant-expression
C++0x Extensions:
member-declaration:
static_assert-declaration */
static void
cp_parser_member_declaration (cp_parser* parser)
{
cp_decl_specifier_seq decl_specifiers;
tree prefix_attributes;
tree decl;
int declares_class_or_enum;
bool friend_p;
cp_token *token = NULL;
cp_token *decl_spec_token_start = NULL;
cp_token *initializer_token_start = NULL;
int saved_pedantic;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Recurse. */
cp_parser_member_declaration (parser);
/* Restore the old value of the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Check for a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* An explicit specialization here is an error condition, and we
expect the specialization handler to detect and report this. */
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
else
cp_parser_template_declaration (parser, /*member_p=*/true);
return;
}
/* Check for a using-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING))
{
if (cxx_dialect < cxx0x)
{
/* Parse the using-declaration. */
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
return;
}
else
{
tree decl;
cp_parser_parse_tentatively (parser);
decl = cp_parser_alias_declaration (parser);
if (cp_parser_parse_definitely (parser))
finish_member_declaration (decl);
else
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
return;
}
}
/* Check for @defs. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS))
{
tree ivar, member;
tree ivar_chains = cp_parser_objc_defs_expression (parser);
ivar = ivar_chains;
while (ivar)
{
member = ivar;
ivar = TREE_CHAIN (member);
TREE_CHAIN (member) = NULL_TREE;
finish_member_declaration (member);
}
return;
}
/* If the next token is `static_assert' we have a static assertion. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC_ASSERT))
{
cp_parser_static_assert (parser, /*member_p=*/true);
return;
}
parser->colon_corrects_to_scope_p = false;
if (cp_parser_using_declaration (parser, /*access_declaration=*/true))
goto out;
/* Parse the decl-specifier-seq. */
decl_spec_token_start = cp_lexer_peek_token (parser->lexer);
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
prefix_attributes = decl_specifiers.attributes;
decl_specifiers.attributes = NULL_TREE;
/* Check for an invalid type-name. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
goto out;
/* If there is no declarator, then the decl-specifier-seq should
specify a type. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
/* If there was no decl-specifier-seq, and the next token is a
`;', then we have something like:
struct S { ; };
[class.mem]
Each member-declaration shall declare at least one member
name of the class. */
if (!decl_specifiers.any_specifiers_p)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (!in_system_header_at (token->location))
pedwarn (token->location, OPT_pedantic, "extra %<;%>");
}
else
{
tree type;
/* See if this declaration is a friend. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* If there were decl-specifiers, check to see if there was
a class-declaration. */
type = check_tag_decl (&decl_specifiers);
/* Nested classes have already been added to the class, but
a `friend' needs to be explicitly registered. */
if (friend_p)
{
/* If the `friend' keyword was present, the friend must
be introduced with a class-key. */
if (!declares_class_or_enum && cxx_dialect < cxx0x)
pedwarn (decl_spec_token_start->location, OPT_pedantic,
"in C++03 a class-key must be used "
"when declaring a friend");
/* In this case:
template <typename T> struct A {
friend struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by check_tag_decl. */
if (!type)
{
type = decl_specifiers.type;
if (type && TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
}
if (!type || !TYPE_P (type))
error_at (decl_spec_token_start->location,
"friend declaration does not name a class or "
"function");
else
make_friend_class (current_class_type, type,
/*complain=*/true);
}
/* If there is no TYPE, an error message will already have
been issued. */
else if (!type || type == error_mark_node)
;
/* An anonymous aggregate has to be handled specially; such
a declaration really declares a data member (with a
particular type), as opposed to a nested class. */
else if (ANON_AGGR_TYPE_P (type))
{
/* Remove constructors and such from TYPE, now that we
know it is an anonymous aggregate. */
fixup_anonymous_aggr (type);
/* And make the corresponding data member. */
decl = build_decl (decl_spec_token_start->location,
FIELD_DECL, NULL_TREE, type);
/* Add it to the class. */
finish_member_declaration (decl);
}
else
cp_parser_check_access_in_redeclaration
(TYPE_NAME (type),
decl_spec_token_start->location);
}
}
else
{
bool assume_semicolon = false;
/* See if these declarations will be friends. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree attributes = NULL_TREE;
tree first_attribute;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for a bitfield declaration. */
if (token->type == CPP_COLON
|| (token->type == CPP_NAME
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
tree identifier;
tree width;
/* Get the name of the bitfield. Note that we cannot just
check TOKEN here because it may have been invalidated by
the call to cp_lexer_peek_nth_token above. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON)
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
/* Look for attributes that apply to the bitfield. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* Create the bitfield declaration. */
decl = grokbitfield (identifier
? make_id_declarator (NULL_TREE,
identifier,
sfk_none)
: NULL,
&decl_specifiers,
width,
attributes);
}
else
{
cp_declarator *declarator;
tree initializer;
tree asm_specification;
int ctor_dtor_or_conv_p;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/true);
/* If something went wrong parsing the declarator, make sure
that we at least consume some tokens. */
if (declarator == cp_error_declarator)
{
/* Skip to the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is not a semicolon, that is
probably because we just skipped over the body of
a function. So, we consume a semicolon if
present, but do not issue an error message if it
is not present. */
if (cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
goto out;
}
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type
(declarator, decl_specifiers.type,
decl_specifiers.type_location);
/* Look for an asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* Look for attributes that apply to the declaration. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* If it's an `=', then we have a constant-initializer or a
pure-specifier. It is not correct to parse the
initializer before registering the member declaration
since the member declaration should be in scope while
its initializer is processed. However, the rest of the
front end does not yet provide an interface that allows
us to handle this correctly. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* In [class.mem]:
A pure-specifier shall be used only in the declaration of
a virtual function.
A member-declarator can contain a constant-initializer
only if it declares a static member of integral or
enumeration type.
Therefore, if the DECLARATOR is for a function, we look
for a pure-specifier; otherwise, we look for a
constant-initializer. When we call `grokfield', it will
perform more stringent semantics checks. */
initializer_token_start = cp_lexer_peek_token (parser->lexer);
if (function_declarator_p (declarator)
|| (decl_specifiers.type
&& TREE_CODE (decl_specifiers.type) == TYPE_DECL
&& (TREE_CODE (TREE_TYPE (decl_specifiers.type))
== FUNCTION_TYPE)))
initializer = cp_parser_pure_specifier (parser);
else if (decl_specifiers.storage_class != sc_static)
initializer = cp_parser_save_nsdmi (parser);
else if (cxx_dialect >= cxx0x)
{
bool nonconst;
/* Don't require a constant rvalue in C++11, since we
might want a reference constant. We'll enforce
constancy later. */
cp_lexer_consume_token (parser->lexer);
/* Parse the initializer. */
initializer = cp_parser_initializer_clause (parser,
&nonconst);
}
else
/* Parse the initializer. */
initializer = cp_parser_constant_initializer (parser);
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)
&& !function_declarator_p (declarator))
{
bool x;
if (decl_specifiers.storage_class != sc_static)
initializer = cp_parser_save_nsdmi (parser);
else
initializer = cp_parser_initializer (parser, &x, &x);
}
/* Otherwise, there is no initializer. */
else
initializer = NULL_TREE;
/* See if we are probably looking at a function
definition. We are certainly not looking at a
member-declarator. Calling `grokfield' has
side-effects, so we must not do it unless we are sure
that we are looking at a member-declarator. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
{
/* The grammar does not allow a pure-specifier to be
used when a member function is defined. (It is
possible that this fact is an oversight in the
standard, since a pure function may be defined
outside of the class-specifier. */
if (initializer && initializer_token_start)
error_at (initializer_token_start->location,
"pure-specifier on function-definition");
decl = cp_parser_save_member_function_body (parser,
&decl_specifiers,
declarator,
attributes);
/* If the member was not a friend, declare it here. */
if (!friend_p)
finish_member_declaration (decl);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a semicolon, consume it. */
if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
goto out;
}
else
if (declarator->kind == cdk_function)
declarator->id_loc = token->location;
/* Create the declaration. */
decl = grokfield (declarator, &decl_specifiers,
initializer, /*init_const_expr_p=*/true,
asm_specification,
attributes);
}
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
/* If there is any qualification still in effect, clear it
now; we will be starting fresh with the next declarator. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* If it's a `,', then there are more declarators. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
/* If the next token isn't a `;', then we have a parse error. */
else if (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
/* The next token might be a ways away from where the
actual semicolon is missing. Find the previous token
and use that for our error position. */
cp_token *token = cp_lexer_previous_token (parser->lexer);
error_at (token->location,
"expected %<;%> at end of member declaration");
/* Assume that the user meant to provide a semicolon. If
we were to cp_parser_skip_to_end_of_statement, we might
skip to a semicolon inside a member function definition
and issue nonsensical error messages. */
assume_semicolon = true;
}
if (decl)
{
/* Add DECL to the list of members. */
if (!friend_p)
finish_member_declaration (decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
else if (TREE_CODE (decl) == FIELD_DECL
&& !DECL_C_BIT_FIELD (decl)
&& DECL_INITIAL (decl))
/* Add DECL to the queue of NSDMI to be parsed later. */
VEC_safe_push (tree, gc, unparsed_nsdmis, decl);
}
if (assume_semicolon)
goto out;
}
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* Parse a pure-specifier.
pure-specifier:
= 0
Returns INTEGER_ZERO_NODE if a pure specifier is found.
Otherwise, ERROR_MARK_NODE is returned. */
static tree
cp_parser_pure_specifier (cp_parser* parser)
{
cp_token *token;
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
return error_mark_node;
/* Look for the `0' token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
return error_mark_node;
cp_lexer_consume_token (parser->lexer);
/* Accept = default or = delete in c++0x mode. */
if (token->keyword == RID_DEFAULT
|| token->keyword == RID_DELETE)
{
maybe_warn_cpp0x (CPP0X_DEFAULTED_DELETED);
return token->u.value;
}
/* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */
if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO))
{
cp_parser_error (parser,
"invalid pure specifier (only %<= 0%> is allowed)");
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
{
error_at (token->location, "templates may not be %<virtual%>");
return error_mark_node;
}
return integer_zero_node;
}
/* Parse a constant-initializer.
constant-initializer:
= constant-expression
Returns a representation of the constant-expression. */
static tree
cp_parser_constant_initializer (cp_parser* parser)
{
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
return error_mark_node;
/* It is invalid to write:
struct S { static const int i = { 7 }; };
*/
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_parser_error (parser,
"a brace-enclosed initializer is not allowed here");
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
/* Skip the initializer. */
cp_parser_skip_to_closing_brace (parser);
/* Look for the trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
return error_mark_node;
}
return cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
}
/* Derived classes [gram.class.derived] */
/* Parse a base-clause.
base-clause:
: base-specifier-list
base-specifier-list:
base-specifier ... [opt]
base-specifier-list , base-specifier ... [opt]
Returns a TREE_LIST representing the base-classes, in the order in
which they were declared. The representation of each node is as
described by cp_parser_base_specifier.
In the case that no bases are specified, this function will return
NULL_TREE, not ERROR_MARK_NODE. */
static tree
cp_parser_base_clause (cp_parser* parser)
{
tree bases = NULL_TREE;
/* Look for the `:' that begins the list. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* Scan the base-specifier-list. */
while (true)
{
cp_token *token;
tree base;
bool pack_expansion_p = false;
/* Look for the base-specifier. */
base = cp_parser_base_specifier (parser);
/* Look for the (optional) ellipsis. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
pack_expansion_p = true;
}
/* Add BASE to the front of the list. */
if (base && base != error_mark_node)
{
if (pack_expansion_p)
/* Make this a pack expansion type. */
TREE_VALUE (base) = make_pack_expansion (TREE_VALUE (base));
if (!check_for_bare_parameter_packs (TREE_VALUE (base)))
{
TREE_CHAIN (base) = bases;
bases = base;
}
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a comma, then the list is complete. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
/* PARSER->SCOPE may still be non-NULL at this point, if the last
base class had a qualified name. However, the next name that
appears is certainly not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
return nreverse (bases);
}
/* Parse a base-specifier.
base-specifier:
:: [opt] nested-name-specifier [opt] class-name
virtual access-specifier [opt] :: [opt] nested-name-specifier
[opt] class-name
access-specifier virtual [opt] :: [opt] nested-name-specifier
[opt] class-name
Returns a TREE_LIST. The TREE_PURPOSE will be one of
ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to
indicate the specifiers provided. The TREE_VALUE will be a TYPE
(or the ERROR_MARK_NODE) indicating the type that was specified. */
static tree
cp_parser_base_specifier (cp_parser* parser)
{
cp_token *token;
bool done = false;
bool virtual_p = false;
bool duplicate_virtual_error_issued_p = false;
bool duplicate_access_error_issued_p = false;
bool class_scope_p, template_p;
tree access = access_default_node;
tree type;
/* Process the optional `virtual' and `access-specifier'. */
while (!done)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Process `virtual'. */
switch (token->keyword)
{
case RID_VIRTUAL:
/* If `virtual' appears more than once, issue an error. */
if (virtual_p && !duplicate_virtual_error_issued_p)
{
cp_parser_error (parser,
"%<virtual%> specified more than once in base-specified");
duplicate_virtual_error_issued_p = true;
}
virtual_p = true;
/* Consume the `virtual' token. */
cp_lexer_consume_token (parser->lexer);
break;
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* If more than one access specifier appears, issue an
error. */
if (access != access_default_node
&& !duplicate_access_error_issued_p)
{
cp_parser_error (parser,
"more than one access specifier in base-specified");
duplicate_access_error_issued_p = true;
}
access = ridpointers[(int) token->keyword];
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
break;
default:
done = true;
break;
}
}
/* It is not uncommon to see programs mechanically, erroneously, use
the 'typename' keyword to denote (dependent) qualified types
as base classes. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
token = cp_lexer_peek_token (parser->lexer);
if (!processing_template_decl)
error_at (token->location,
"keyword %<typename%> not allowed outside of templates");
else
error_at (token->location,
"keyword %<typename%> not allowed in this context "
"(the base class is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to pretend that we have seen the `typename' keyword at this
point. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
typename_type,
/*is_declaration=*/true);
/* If the base class is given by a qualified name, assume that names
we see are type names or templates, as appropriate. */
class_scope_p = (parser->scope && TYPE_P (parser->scope));
template_p = class_scope_p && cp_parser_optional_template_keyword (parser);
if (!parser->scope
&& cp_lexer_next_token_is_decltype (parser->lexer))
/* DR 950 allows decltype as a base-specifier. */
type = cp_parser_decltype (parser);
else
{
/* Otherwise, look for the class-name. */
type = cp_parser_class_name (parser,
class_scope_p,
template_p,
typename_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
type = TREE_TYPE (type);
}
if (type == error_mark_node)
return error_mark_node;
return finish_base_specifier (type, access, virtual_p);
}
/* Exception handling [gram.exception] */
/* Parse an (optional) noexcept-specification.
noexcept-specification:
noexcept ( constant-expression ) [opt]
If no noexcept-specification is present, returns NULL_TREE.
Otherwise, if REQUIRE_CONSTEXPR is false, then either parse and return any
expression if parentheses follow noexcept, or return BOOLEAN_TRUE_NODE if
there are no parentheses. CONSUMED_EXPR will be set accordingly.
Otherwise, returns a noexcept specification unless RETURN_COND is true,
in which case a boolean condition is returned instead. */
static tree
cp_parser_noexcept_specification_opt (cp_parser* parser,
bool require_constexpr,
bool* consumed_expr,
bool return_cond)
{
cp_token *token;
const char *saved_message;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Is it a noexcept-specification? */
if (cp_parser_is_keyword (token, RID_NOEXCEPT))
{
tree expr;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer);
if (require_constexpr)
{
/* Types may not be defined in an exception-specification. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in an exception-specification");
expr = cp_parser_constant_expression (parser, false, NULL);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
}
else
{
expr = cp_parser_expression (parser, false, NULL);
*consumed_expr = true;
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
else
{
expr = boolean_true_node;
if (!require_constexpr)
*consumed_expr = false;
}
/* We cannot build a noexcept-spec right away because this will check
that expr is a constexpr. */
if (!return_cond)
return build_noexcept_spec (expr, tf_warning_or_error);
else
return expr;
}
else
return NULL_TREE;
}
/* Parse an (optional) exception-specification.
exception-specification:
throw ( type-id-list [opt] )
Returns a TREE_LIST representing the exception-specification. The
TREE_VALUE of each node is a type. */
static tree
cp_parser_exception_specification_opt (cp_parser* parser)
{
cp_token *token;
tree type_id_list;
const char *saved_message;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Is it a noexcept-specification? */
type_id_list = cp_parser_noexcept_specification_opt(parser, true, NULL,
false);
if (type_id_list != NULL_TREE)
return type_id_list;
/* If it's not `throw', then there's no exception-specification. */
if (!cp_parser_is_keyword (token, RID_THROW))
return NULL_TREE;
#if 0
/* Enable this once a lot of code has transitioned to noexcept? */
if (cxx_dialect == cxx0x && !in_system_header)
warning (OPT_Wdeprecated, "dynamic exception specifications are "
"deprecated in C++0x; use %<noexcept%> instead");
#endif
/* Consume the `throw'. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a `)', then there is a type-id-list. */
if (token->type != CPP_CLOSE_PAREN)
{
/* Types may not be defined in an exception-specification. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in an exception-specification");
/* Parse the type-id-list. */
type_id_list = cp_parser_type_id_list (parser);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
}
else
type_id_list = empty_except_spec;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return type_id_list;
}
/* Parse an (optional) type-id-list.
type-id-list:
type-id ... [opt]
type-id-list , type-id ... [opt]
Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE,
in the order that the types were presented. */
static tree
cp_parser_type_id_list (cp_parser* parser)
{
tree types = NULL_TREE;
while (true)
{
cp_token *token;
tree type;
/* Get the next type-id. */
type = cp_parser_type_id (parser);
/* Parse the optional ellipsis. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Turn the type into a pack expansion expression. */
type = make_pack_expansion (type);
}
/* Add it to the list. */
types = add_exception_specifier (types, type, /*complain=*/1);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is not a `,', we are done. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (types);
}
/* Parse a try-block.
try-block:
try compound-statement handler-seq */
static tree
cp_parser_try_block (cp_parser* parser)
{
tree try_block;
cp_parser_require_keyword (parser, RID_TRY, RT_TRY);
try_block = begin_try_block ();
cp_parser_compound_statement (parser, NULL, true, false);
finish_try_block (try_block);
cp_parser_handler_seq (parser);
finish_handler_sequence (try_block);
return try_block;
}
/* Parse a function-try-block.
function-try-block:
try ctor-initializer [opt] function-body handler-seq */
static bool
cp_parser_function_try_block (cp_parser* parser)
{
tree compound_stmt;
tree try_block;
bool ctor_initializer_p;
/* Look for the `try' keyword. */
if (!cp_parser_require_keyword (parser, RID_TRY, RT_TRY))
return false;
/* Let the rest of the front end know where we are. */
try_block = begin_function_try_block (&compound_stmt);
/* Parse the function-body. */
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
/* We're done with the `try' part. */
finish_function_try_block (try_block);
/* Parse the handlers. */
cp_parser_handler_seq (parser);
/* We're done with the handlers. */
finish_function_handler_sequence (try_block, compound_stmt);
return ctor_initializer_p;
}
/* Parse a handler-seq.
handler-seq:
handler handler-seq [opt] */
static void
cp_parser_handler_seq (cp_parser* parser)
{
while (true)
{
cp_token *token;
/* Parse the handler. */
cp_parser_handler (parser);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `catch' then there are no more handlers. */
if (!cp_parser_is_keyword (token, RID_CATCH))
break;
}
}
/* Parse a handler.
handler:
catch ( exception-declaration ) compound-statement */
static void
cp_parser_handler (cp_parser* parser)
{
tree handler;
tree declaration;
cp_parser_require_keyword (parser, RID_CATCH, RT_CATCH);
handler = begin_handler ();
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
declaration = cp_parser_exception_declaration (parser);
finish_handler_parms (declaration, handler);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_compound_statement (parser, NULL, false, false);
finish_handler (handler);
}
/* Parse an exception-declaration.
exception-declaration:
type-specifier-seq declarator
type-specifier-seq abstract-declarator
type-specifier-seq
...
Returns a VAR_DECL for the declaration, or NULL_TREE if the
ellipsis variant is used. */
static tree
cp_parser_exception_declaration (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
const char *saved_message;
/* If it's an ellipsis, it's easy to handle. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL_TREE;
}
/* Types may not be defined in exception-declarations. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in exception-declarations");
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/true,
/*is_trailing_return=*/false,
&type_specifiers);
/* If it's a `)', then there is no declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
declarator = NULL;
else
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
if (!type_specifiers.any_specifiers_p)
return error_mark_node;
return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL);
}
/* Parse a throw-expression.
throw-expression:
throw assignment-expression [opt]
Returns a THROW_EXPR representing the throw-expression. */
static tree
cp_parser_throw_expression (cp_parser* parser)
{
tree expression;
cp_token* token;
cp_parser_require_keyword (parser, RID_THROW, RT_THROW);
token = cp_lexer_peek_token (parser->lexer);
/* Figure out whether or not there is an assignment-expression
following the "throw" keyword. */
if (token->type == CPP_COMMA
|| token->type == CPP_SEMICOLON
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_COLON)
expression = NULL_TREE;
else
expression = cp_parser_assignment_expression (parser,
/*cast_p=*/false, NULL);
return build_throw (expression);
}
/* GNU Extensions */
/* Parse an (optional) asm-specification.
asm-specification:
asm ( string-literal )
If the asm-specification is present, returns a STRING_CST
corresponding to the string-literal. Otherwise, returns
NULL_TREE. */
static tree
cp_parser_asm_specification_opt (cp_parser* parser)
{
cp_token *token;
tree asm_specification;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token isn't the `asm' keyword, then there's no
asm-specification. */
if (!cp_parser_is_keyword (token, RID_ASM))
return NULL_TREE;
/* Consume the `asm' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Look for the string-literal. */
asm_specification = cp_parser_string_literal (parser, false, false);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return asm_specification;
}
/* Parse an asm-operand-list.
asm-operand-list:
asm-operand
asm-operand-list , asm-operand
asm-operand:
string-literal ( expression )
[ string-literal ] string-literal ( expression )
Returns a TREE_LIST representing the operands. The TREE_VALUE of
each node is the expression. The TREE_PURPOSE is itself a
TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed
string-literal (or NULL_TREE if not present) and whose TREE_VALUE
is a STRING_CST for the string literal before the parenthesis. Returns
ERROR_MARK_NODE if any of the operands are invalid. */
static tree
cp_parser_asm_operand_list (cp_parser* parser)
{
tree asm_operands = NULL_TREE;
bool invalid_operands = false;
while (true)
{
tree string_literal;
tree expression;
tree name;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Read the operand name. */
name = cp_parser_identifier (parser);
if (name != error_mark_node)
name = build_string (IDENTIFIER_LENGTH (name),
IDENTIFIER_POINTER (name));
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
}
else
name = NULL_TREE;
/* Look for the string-literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
if (name == error_mark_node
|| string_literal == error_mark_node
|| expression == error_mark_node)
invalid_operands = true;
/* Add this operand to the list. */
asm_operands = tree_cons (build_tree_list (name, string_literal),
expression,
asm_operands);
/* If the next token is not a `,', there are no more
operands. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return invalid_operands ? error_mark_node : nreverse (asm_operands);
}
/* Parse an asm-clobber-list.
asm-clobber-list:
string-literal
asm-clobber-list , string-literal
Returns a TREE_LIST, indicating the clobbers in the order that they
appeared. The TREE_VALUE of each node is a STRING_CST. */
static tree
cp_parser_asm_clobber_list (cp_parser* parser)
{
tree clobbers = NULL_TREE;
while (true)
{
tree string_literal;
/* Look for the string literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Add it to the list. */
clobbers = tree_cons (NULL_TREE, string_literal, clobbers);
/* If the next token is not a `,', then the list is
complete. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return clobbers;
}
/* Parse an asm-label-list.
asm-label-list:
identifier
asm-label-list , identifier
Returns a TREE_LIST, indicating the labels in the order that they
appeared. The TREE_VALUE of each node is a label. */
static tree
cp_parser_asm_label_list (cp_parser* parser)
{
tree labels = NULL_TREE;
while (true)
{
tree identifier, label, name;
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (!error_operand_p (identifier))
{
label = lookup_label (identifier);
if (TREE_CODE (label) == LABEL_DECL)
{
TREE_USED (label) = 1;
check_goto (label);
name = build_string (IDENTIFIER_LENGTH (identifier),
IDENTIFIER_POINTER (identifier));
labels = tree_cons (name, label, labels);
}
}
/* If the next token is not a `,', then the list is
complete. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (labels);
}
/* Parse an (optional) series of attributes.
attributes:
attributes attribute
attribute:
__attribute__ (( attribute-list [opt] ))
The return value is as for cp_parser_attribute_list. */
static tree
cp_parser_attributes_opt (cp_parser* parser)
{
tree attributes = NULL_TREE;
while (true)
{
cp_token *token;
tree attribute_list;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `__attribute__', then we're done. */
if (token->keyword != RID_ATTRIBUTE)
break;
/* Consume the `__attribute__' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the two `(' tokens. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_CLOSE_PAREN)
/* Parse the attribute-list. */
attribute_list = cp_parser_attribute_list (parser);
else
/* If the next token is a `)', then there is no attribute
list. */
attribute_list = NULL;
/* Look for the two `)' tokens. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Add these new attributes to the list. */
attributes = chainon (attributes, attribute_list);
}
return attributes;
}
/* Parse an attribute-list.
attribute-list:
attribute
attribute-list , attribute
attribute:
identifier
identifier ( identifier )
identifier ( identifier , expression-list )
identifier ( expression-list )
Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds
to an attribute. The TREE_PURPOSE of each node is the identifier
indicating which attribute is in use. The TREE_VALUE represents
the arguments, if any. */
static tree
cp_parser_attribute_list (cp_parser* parser)
{
tree attribute_list = NULL_TREE;
bool save_translate_strings_p = parser->translate_strings_p;
parser->translate_strings_p = false;
while (true)
{
cp_token *token;
tree identifier;
tree attribute;
/* Look for the identifier. We also allow keywords here; for
example `__attribute__ ((const))' is legal. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->type == CPP_KEYWORD)
{
tree arguments = NULL_TREE;
/* Consume the token. */
token = cp_lexer_consume_token (parser->lexer);
/* Save away the identifier that indicates which attribute
this is. */
identifier = (token->type == CPP_KEYWORD)
/* For keywords, use the canonical spelling, not the
parsed identifier. */
? ridpointers[(int) token->keyword]
: token->u.value;
attribute = build_tree_list (identifier, NULL_TREE);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an `(', then parse the attribute arguments. */
if (token->type == CPP_OPEN_PAREN)
{
VEC(tree,gc) *vec;
int attr_flag = (attribute_takes_identifier_p (identifier)
? id_attr : normal_attr);
vec = cp_parser_parenthesized_expression_list
(parser, attr_flag, /*cast_p=*/false,
/*allow_expansion_p=*/false,
/*non_constant_p=*/NULL);
if (vec == NULL)
arguments = error_mark_node;
else
{
arguments = build_tree_list_vec (vec);
release_tree_vector (vec);
}
/* Save the arguments away. */
TREE_VALUE (attribute) = arguments;
}
if (arguments != error_mark_node)
{
/* Add this attribute to the list. */
TREE_CHAIN (attribute) = attribute_list;
attribute_list = attribute;
}
token = cp_lexer_peek_token (parser->lexer);
}
/* Now, look for more attributes. If the next token isn't a
`,', we're done. */
if (token->type != CPP_COMMA)
break;
/* Consume the comma and keep going. */
cp_lexer_consume_token (parser->lexer);
}
parser->translate_strings_p = save_translate_strings_p;
/* We built up the list in reverse order. */
return nreverse (attribute_list);
}
/* Parse an optional `__extension__' keyword. Returns TRUE if it is
present, and FALSE otherwise. *SAVED_PEDANTIC is set to the
current value of the PEDANTIC flag, regardless of whether or not
the `__extension__' keyword is present. The caller is responsible
for restoring the value of the PEDANTIC flag. */
static bool
cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic)
{
/* Save the old value of the PEDANTIC flag. */
*saved_pedantic = pedantic;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION))
{
/* Consume the `__extension__' token. */
cp_lexer_consume_token (parser->lexer);
/* We're not being pedantic while the `__extension__' keyword is
in effect. */
pedantic = 0;
return true;
}
return false;
}
/* Parse a label declaration.
label-declaration:
__label__ label-declarator-seq ;
label-declarator-seq:
identifier , label-declarator-seq
identifier */
static void
cp_parser_label_declaration (cp_parser* parser)
{
/* Look for the `__label__' keyword. */
cp_parser_require_keyword (parser, RID_LABEL, RT_LABEL);
while (true)
{
tree identifier;
/* Look for an identifier. */
identifier = cp_parser_identifier (parser);
/* If we failed, stop. */
if (identifier == error_mark_node)
break;
/* Declare it as a label. */
finish_label_decl (identifier);
/* If the next token is a `;', stop. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
break;
/* Look for the `,' separating the label declarations. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
/* Support Functions */
/* Looks up NAME in the current scope, as given by PARSER->SCOPE.
NAME should have one of the representations used for an
id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE
is returned. If PARSER->SCOPE is a dependent type, then a
SCOPE_REF is returned.
If NAME is a TEMPLATE_ID_EXPR, then it will be immediately
returned; the name was already resolved when the TEMPLATE_ID_EXPR
was formed. Abstractly, such entities should not be passed to this
function, because they do not need to be looked up, but it is
simpler to check for this special case here, rather than at the
call-sites.
In cases not explicitly covered above, this function returns a
DECL, OVERLOAD, or baselink representing the result of the lookup.
If there was no entity with the indicated NAME, the ERROR_MARK_NODE
is returned.
If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword
(e.g., "struct") that was used. In that case bindings that do not
refer to types are ignored.
If IS_TEMPLATE is TRUE, bindings that do not refer to templates are
ignored.
If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces
are ignored.
If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent
types.
If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a
TREE_LIST of candidates if name-lookup results in an ambiguity, and
NULL_TREE otherwise. */
static tree
cp_parser_lookup_name (cp_parser *parser, tree name,
enum tag_types tag_type,
bool is_template,
bool is_namespace,
bool check_dependency,
tree *ambiguous_decls,
location_t name_location)
{
int flags = 0;
tree decl;
tree object_type = parser->context->object_type;
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
flags |= LOOKUP_COMPLAIN;
/* Assume that the lookup will be unambiguous. */
if (ambiguous_decls)
*ambiguous_decls = NULL_TREE;
/* Now that we have looked up the name, the OBJECT_TYPE (if any) is
no longer valid. Note that if we are parsing tentatively, and
the parse fails, OBJECT_TYPE will be automatically restored. */
parser->context->object_type = NULL_TREE;
if (name == error_mark_node)
return error_mark_node;
/* A template-id has already been resolved; there is no lookup to
do. */
if (TREE_CODE (name) == TEMPLATE_ID_EXPR)
return name;
if (BASELINK_P (name))
{
gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name))
== TEMPLATE_ID_EXPR);
return name;
}
/* A BIT_NOT_EXPR is used to represent a destructor. By this point,
it should already have been checked to make sure that the name
used matches the type being destroyed. */
if (TREE_CODE (name) == BIT_NOT_EXPR)
{
tree type;
/* Figure out to which type this destructor applies. */
if (parser->scope)
type = parser->scope;
else if (object_type)
type = object_type;
else
type = current_class_type;
/* If that's not a class type, there is no destructor. */
if (!type || !CLASS_TYPE_P (type))
return error_mark_node;
if (CLASSTYPE_LAZY_DESTRUCTOR (type))
lazily_declare_fn (sfk_destructor, type);
if (!CLASSTYPE_DESTRUCTORS (type))
return error_mark_node;
/* If it was a class type, return the destructor. */
return CLASSTYPE_DESTRUCTORS (type);
}
/* By this point, the NAME should be an ordinary identifier. If
the id-expression was a qualified name, the qualifying scope is
stored in PARSER->SCOPE at this point. */
gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
/* Perform the lookup. */
if (parser->scope)
{
bool dependent_p;
if (parser->scope == error_mark_node)
return error_mark_node;
/* If the SCOPE is dependent, the lookup must be deferred until
the template is instantiated -- unless we are explicitly
looking up names in uninstantiated templates. Even then, we
cannot look up the name if the scope is not a class type; it
might, for example, be a template type parameter. */
dependent_p = (TYPE_P (parser->scope)
&& dependent_scope_p (parser->scope));
if ((check_dependency || !CLASS_TYPE_P (parser->scope))
&& dependent_p)
/* Defer lookup. */
decl = error_mark_node;
else
{
tree pushed_scope = NULL_TREE;
/* If PARSER->SCOPE is a dependent type, then it must be a
class type, and we must not be checking dependencies;
otherwise, we would have processed this lookup above. So
that PARSER->SCOPE is not considered a dependent base by
lookup_member, we must enter the scope here. */
if (dependent_p)
pushed_scope = push_scope (parser->scope);
/* If the PARSER->SCOPE is a template specialization, it
may be instantiated during name lookup. In that case,
errors may be issued. Even if we rollback the current
tentative parse, those errors are valid. */
decl = lookup_qualified_name (parser->scope, name,
tag_type != none_type,
/*complain=*/true);
/* 3.4.3.1: In a lookup in which the constructor is an acceptable
lookup result and the nested-name-specifier nominates a class C:
* if the name specified after the nested-name-specifier, when
looked up in C, is the injected-class-name of C (Clause 9), or
* if the name specified after the nested-name-specifier is the
same as the identifier or the simple-template-id's template-
name in the last component of the nested-name-specifier,
the name is instead considered to name the constructor of
class C. [ Note: for example, the constructor is not an
acceptable lookup result in an elaborated-type-specifier so
the constructor would not be used in place of the
injected-class-name. --end note ] Such a constructor name
shall be used only in the declarator-id of a declaration that
names a constructor or in a using-declaration. */
if (tag_type == none_type
&& DECL_SELF_REFERENCE_P (decl)
&& same_type_p (DECL_CONTEXT (decl), parser->scope))
decl = lookup_qualified_name (parser->scope, ctor_identifier,
tag_type != none_type,
/*complain=*/true);
/* If we have a single function from a using decl, pull it out. */
if (TREE_CODE (decl) == OVERLOAD
&& !really_overloaded_fn (decl))
decl = OVL_FUNCTION (decl);
if (pushed_scope)
pop_scope (pushed_scope);
}
/* If the scope is a dependent type and either we deferred lookup or
we did lookup but didn't find the name, rememeber the name. */
if (decl == error_mark_node && TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))
{
if (tag_type)
{
tree type;
/* The resolution to Core Issue 180 says that `struct
A::B' should be considered a type-name, even if `A'
is dependent. */
type = make_typename_type (parser->scope, name, tag_type,
/*complain=*/tf_error);
decl = TYPE_NAME (type);
}
else if (is_template
&& (cp_parser_next_token_ends_template_argument_p (parser)
|| cp_lexer_next_token_is (parser->lexer,
CPP_CLOSE_PAREN)))
decl = make_unbound_class_template (parser->scope,
name, NULL_TREE,
/*complain=*/tf_error);
else
decl = build_qualified_name (/*type=*/NULL_TREE,
parser->scope, name,
is_template);
}
parser->qualifying_scope = parser->scope;
parser->object_scope = NULL_TREE;
}
else if (object_type)
{
tree object_decl = NULL_TREE;
/* Look up the name in the scope of the OBJECT_TYPE, unless the
OBJECT_TYPE is not a class. */
if (CLASS_TYPE_P (object_type))
/* If the OBJECT_TYPE is a template specialization, it may
be instantiated during name lookup. In that case, errors
may be issued. Even if we rollback the current tentative
parse, those errors are valid. */
object_decl = lookup_member (object_type,
name,
/*protect=*/0,
tag_type != none_type,
tf_warning_or_error);
/* Look it up in the enclosing context, too. */
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, flags);
parser->object_scope = object_type;
parser->qualifying_scope = NULL_TREE;
if (object_decl)
decl = object_decl;
}
else
{
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, flags);
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
/* If the lookup failed, let our caller know. */
if (!decl || decl == error_mark_node)
return error_mark_node;
/* Pull out the template from an injected-class-name (or multiple). */
if (is_template)
decl = maybe_get_template_decl_from_type_decl (decl);
/* If it's a TREE_LIST, the result of the lookup was ambiguous. */
if (TREE_CODE (decl) == TREE_LIST)
{
if (ambiguous_decls)
*ambiguous_decls = decl;
/* The error message we have to print is too complicated for
cp_parser_error, so we incorporate its actions directly. */
if (!cp_parser_simulate_error (parser))
{
error_at (name_location, "reference to %qD is ambiguous",
name);
print_candidates (decl);
}
return error_mark_node;
}
gcc_assert (DECL_P (decl)
|| TREE_CODE (decl) == OVERLOAD
|| TREE_CODE (decl) == SCOPE_REF
|| TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE
|| BASELINK_P (decl));
/* If we have resolved the name of a member declaration, check to
see if the declaration is accessible. When the name resolves to
set of overloaded functions, accessibility is checked when
overload resolution is done.
During an explicit instantiation, access is not checked at all,
as per [temp.explicit]. */
if (DECL_P (decl))
check_accessibility_of_qualified_id (decl, object_type, parser->scope);
maybe_record_typedef_use (decl);
return decl;
}
/* Like cp_parser_lookup_name, but for use in the typical case where
CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE,
IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */
static tree
cp_parser_lookup_name_simple (cp_parser* parser, tree name, location_t location)
{
return cp_parser_lookup_name (parser, name,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
location);
}
/* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in
the current context, return the TYPE_DECL. If TAG_NAME_P is
true, the DECL indicates the class being defined in a class-head,
or declared in an elaborated-type-specifier.
Otherwise, return DECL. */
static tree
cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p)
{
/* If the TEMPLATE_DECL is being declared as part of a class-head,
the translation from TEMPLATE_DECL to TYPE_DECL occurs:
struct A {
template <typename T> struct B;
};
template <typename T> struct A::B {};
Similarly, in an elaborated-type-specifier:
namespace N { struct X{}; }
struct A {
template <typename T> friend struct N::X;
};
However, if the DECL refers to a class type, and we are in
the scope of the class, then the name lookup automatically
finds the TYPE_DECL created by build_self_reference rather
than a TEMPLATE_DECL. For example, in:
template <class T> struct S {
S s;
};
there is no need to handle such case. */
if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p)
return DECL_TEMPLATE_RESULT (decl);
return decl;
}
/* If too many, or too few, template-parameter lists apply to the
declarator, issue an error message. Returns TRUE if all went well,
and FALSE otherwise. */
static bool
cp_parser_check_declarator_template_parameters (cp_parser* parser,
cp_declarator *declarator,
location_t declarator_location)
{
unsigned num_templates;
/* We haven't seen any classes that involve template parameters yet. */
num_templates = 0;
switch (declarator->kind)
{
case cdk_id:
if (declarator->u.id.qualifying_scope)
{
tree scope;
scope = declarator->u.id.qualifying_scope;
while (scope && CLASS_TYPE_P (scope))
{
/* You're supposed to have one `template <...>'
for every template class, but you don't need one
for a full specialization. For example:
template <class T> struct S{};
template <> struct S<int> { void f(); };
void S<int>::f () {}
is correct; there shouldn't be a `template <>' for
the definition of `S<int>::f'. */
if (!CLASSTYPE_TEMPLATE_INFO (scope))
/* If SCOPE does not have template information of any
kind, then it is not a template, nor is it nested
within a template. */
break;
if (explicit_class_specialization_p (scope))
break;
if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)))
++num_templates;
scope = TYPE_CONTEXT (scope);
}
}
else if (TREE_CODE (declarator->u.id.unqualified_name)
== TEMPLATE_ID_EXPR)
/* If the DECLARATOR has the form `X<y>' then it uses one
additional level of template parameters. */
++num_templates;
return cp_parser_check_template_parameters
(parser, num_templates, declarator_location, declarator);
case cdk_function:
case cdk_array:
case cdk_pointer:
case cdk_reference:
case cdk_ptrmem:
return (cp_parser_check_declarator_template_parameters
(parser, declarator->declarator, declarator_location));
case cdk_error:
return true;
default:
gcc_unreachable ();
}
return false;
}
/* NUM_TEMPLATES were used in the current declaration. If that is
invalid, return FALSE and issue an error messages. Otherwise,
return TRUE. If DECLARATOR is non-NULL, then we are checking a
declarator and we can print more accurate diagnostics. */
static bool
cp_parser_check_template_parameters (cp_parser* parser,
unsigned num_templates,
location_t location,
cp_declarator *declarator)
{
/* If there are the same number of template classes and parameter
lists, that's OK. */
if (parser->num_template_parameter_lists == num_templates)
return true;
/* If there are more, but only one more, then we are referring to a
member template. That's OK too. */
if (parser->num_template_parameter_lists == num_templates + 1)
return true;
/* If there are more template classes than parameter lists, we have
something like:
template <class T> void S<T>::R<T>::f (); */
if (parser->num_template_parameter_lists < num_templates)
{
if (declarator && !current_function_decl)
error_at (location, "specializing member %<%T::%E%> "
"requires %<template<>%> syntax",
declarator->u.id.qualifying_scope,
declarator->u.id.unqualified_name);
else if (declarator)
error_at (location, "invalid declaration of %<%T::%E%>",
declarator->u.id.qualifying_scope,
declarator->u.id.unqualified_name);
else
error_at (location, "too few template-parameter-lists");
return false;
}
/* Otherwise, there are too many template parameter lists. We have
something like:
template <class T> template <class U> void S::f(); */
error_at (location, "too many template-parameter-lists");
return false;
}
/* Parse an optional `::' token indicating that the following name is
from the global namespace. If so, PARSER->SCOPE is set to the
GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE,
unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone.
Returns the new value of PARSER->SCOPE, if the `::' token is
present, and NULL_TREE otherwise. */
static tree
cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a `::' token then we're starting from the
global namespace, not our current location. */
if (token->type == CPP_SCOPE)
{
/* Consume the `::' token. */
cp_lexer_consume_token (parser->lexer);
/* Set the SCOPE so that we know where to start the lookup. */
parser->scope = global_namespace;
parser->qualifying_scope = global_namespace;
parser->object_scope = NULL_TREE;
return parser->scope;
}
else if (!current_scope_valid_p)
{
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
return NULL_TREE;
}
/* Returns TRUE if the upcoming token sequence is the start of a
constructor declarator. If FRIEND_P is true, the declarator is
preceded by the `friend' specifier. */
static bool
cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p)
{
bool constructor_p;
tree nested_name_specifier;
cp_token *next_token;
/* The common case is that this is not a constructor declarator, so
try to avoid doing lots of work if at all possible. It's not
valid declare a constructor at function scope. */
if (parser->in_function_body)
return false;
/* And only certain tokens can begin a constructor declarator. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type != CPP_NAME
&& next_token->type != CPP_SCOPE
&& next_token->type != CPP_NESTED_NAME_SPECIFIER
&& next_token->type != CPP_TEMPLATE_ID)
return false;
/* Parse tentatively; we are going to roll back all of the tokens
consumed here. */
cp_parser_parse_tentatively (parser);
/* Assume that we are looking at a constructor declarator. */
constructor_p = true;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
nested_name_specifier
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false));
/* Outside of a class-specifier, there must be a
nested-name-specifier. */
if (!nested_name_specifier &&
(!at_class_scope_p () || !TYPE_BEING_DEFINED (current_class_type)
|| friend_p))
constructor_p = false;
else if (nested_name_specifier == error_mark_node)
constructor_p = false;
/* If we have a class scope, this is easy; DR 147 says that S::S always
names the constructor, and no other qualified name could. */
if (constructor_p && nested_name_specifier
&& CLASS_TYPE_P (nested_name_specifier))
{
tree id = cp_parser_unqualified_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*declarator_p=*/true,
/*optional_p=*/false);
if (is_overloaded_fn (id))
id = DECL_NAME (get_first_fn (id));
if (!constructor_name_p (id, nested_name_specifier))
constructor_p = false;
}
/* If we still think that this might be a constructor-declarator,
look for a class-name. */
else if (constructor_p)
{
/* If we have:
template <typename T> struct S {
S();
};
we must recognize that the nested `S' names a class. */
tree type_decl;
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/false,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If there was no class-name, then this is not a constructor. */
constructor_p = !cp_parser_error_occurred (parser);
/* If we're still considering a constructor, we have to see a `(',
to begin the parameter-declaration-clause, followed by either a
`)', an `...', or a decl-specifier. We need to check for a
type-specifier to avoid being fooled into thinking that:
S (f) (int);
is a constructor. (It is actually a function named `f' that
takes one parameter (of type `int') and returns a value of type
`S'. */
if (constructor_p
&& !cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
constructor_p = false;
if (constructor_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS)
/* A parameter declaration begins with a decl-specifier,
which is either the "attribute" keyword, a storage class
specifier, or (usually) a type-specifier. */
&& !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer))
{
tree type;
tree pushed_scope = NULL_TREE;
unsigned saved_num_template_parameter_lists;
/* Names appearing in the type-specifier should be looked up
in the scope of the class. */
if (current_class_type)
type = NULL_TREE;
else
{
type = TREE_TYPE (type_decl);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
type = resolve_typename_type (type,
/*only_current_p=*/false);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
}
pushed_scope = push_scope (type);
}
/* Inside the constructor parameter list, surrounding
template-parameter-lists do not apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* Look for the type-specifier. */
cp_parser_type_specifier (parser,
CP_PARSER_FLAGS_NONE,
/*decl_specs=*/NULL,
/*is_declarator=*/true,
/*declares_class_or_enum=*/NULL,
/*is_cv_qualifier=*/NULL);
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
/* Leave the scope of the class. */
if (pushed_scope)
pop_scope (pushed_scope);
constructor_p = !cp_parser_error_occurred (parser);
}
}
/* We did not really want to consume any tokens. */
cp_parser_abort_tentative_parse (parser);
return constructor_p;
}
/* Parse the definition of the function given by the DECL_SPECIFIERS,
ATTRIBUTES, and DECLARATOR. The access checks have been deferred;
they must be performed once we are in the scope of the function.
Returns the function defined. */
static tree
cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
tree attributes,
const cp_declarator *declarator)
{
tree fn;
bool success_p;
/* Begin the function-definition. */
success_p = start_function (decl_specifiers, declarator, attributes);
/* The things we're about to see are not directly qualified by any
template headers we've seen thus far. */
reset_specialization ();
/* If there were names looked up in the decl-specifier-seq that we
did not check, check them now. We must wait until we are in the
scope of the function to perform the checks, since the function
might be a friend. */
perform_deferred_access_checks ();
if (!success_p)
{
/* Skip the entire function. */
cp_parser_skip_to_end_of_block_or_statement (parser);
fn = error_mark_node;
}
else if (DECL_INITIAL (current_function_decl) != error_mark_node)
{
/* Seen already, skip it. An error message has already been output. */
cp_parser_skip_to_end_of_block_or_statement (parser);
fn = current_function_decl;
current_function_decl = NULL_TREE;
/* If this is a function from a class, pop the nested class. */
if (current_class_name)
pop_nested_class ();
}
else
{
timevar_id_t tv;
if (DECL_DECLARED_INLINE_P (current_function_decl))
tv = TV_PARSE_INLINE;
else
tv = TV_PARSE_FUNC;
timevar_push (tv);
fn = cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/false);
timevar_pop (tv);
}
return fn;
}
/* Parse the part of a function-definition that follows the
declarator. INLINE_P is TRUE iff this function is an inline
function defined within a class-specifier.
Returns the function defined. */
static tree
cp_parser_function_definition_after_declarator (cp_parser* parser,
bool inline_p)
{
tree fn;
bool ctor_initializer_p = false;
bool saved_in_unbraced_linkage_specification_p;
bool saved_in_function_body;
unsigned saved_num_template_parameter_lists;
cp_token *token;
saved_in_function_body = parser->in_function_body;
parser->in_function_body = true;
/* If the next token is `return', then the code may be trying to
make use of the "named return value" extension that G++ used to
support. */
token = cp_lexer_peek_token (parser->lexer);
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN))
{
/* Consume the `return' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier that indicates what value is to be
returned. */
cp_parser_identifier (parser);
/* Issue an error message. */
error_at (token->location,
"named return values are no longer supported");
/* Skip tokens until we reach the start of the function body. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
cp_lexer_consume_token (parser->lexer);
}
}
/* The `extern' in `extern "C" void f () { ... }' does not apply to
anything declared inside `f'. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Inside the function, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
start_lambda_scope (current_function_decl);
/* If the next token is `try', `__transaction_atomic', or
`__transaction_relaxed`, then we are looking at either function-try-block
or function-transaction-block. Note that all of these include the
function-body. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_ATOMIC))
ctor_initializer_p = cp_parser_function_transaction (parser,
RID_TRANSACTION_ATOMIC);
else if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TRANSACTION_RELAXED))
ctor_initializer_p = cp_parser_function_transaction (parser,
RID_TRANSACTION_RELAXED);
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
else
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
finish_lambda_scope ();
/* Finish the function. */
fn = finish_function ((ctor_initializer_p ? 1 : 0) |
(inline_p ? 2 : 0));
/* Generate code for it, if necessary. */
expand_or_defer_fn (fn);
/* Restore the saved values. */
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
parser->in_function_body = saved_in_function_body;
return fn;
}
/* Parse a template-declaration, assuming that the `export' (and
`extern') keywords, if present, has already been scanned. MEMBER_P
is as for cp_parser_template_declaration. */
static void
cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p)
{
tree decl = NULL_TREE;
VEC (deferred_access_check,gc) *checks;
tree parameter_list;
bool friend_p = false;
bool need_lang_pop;
cp_token *token;
/* Look for the `template' keyword. */
token = cp_lexer_peek_token (parser->lexer);
if (!cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE))
return;
/* And the `<'. */
if (!cp_parser_require (parser, CPP_LESS, RT_LESS))
return;
if (at_class_scope_p () && current_function_decl)
{
/* 14.5.2.2 [temp.mem]
A local class shall not have member templates. */
error_at (token->location,
"invalid declaration of member template in local class");
cp_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* [temp]
A template ... shall not have C linkage. */
if (current_lang_name == lang_name_c)
{
error_at (token->location, "template with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* We cannot perform access checks on the template parameter
declarations until we know what is being declared, just as we
cannot check the decl-specifier list. */
push_deferring_access_checks (dk_deferred);
/* If the next token is `>', then we have an invalid
specialization. Rather than complain about an invalid template
parameter, issue an error message here. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER))
{
cp_parser_error (parser, "invalid explicit specialization");
begin_specialization ();
parameter_list = NULL_TREE;
}
else
{
/* Parse the template parameters. */
parameter_list = cp_parser_template_parameter_list (parser);
fixup_template_parms ();
}
/* Get the deferred access checks from the parameter list. These
will be checked once we know what is being declared, as for a
member template the checks must be performed in the scope of the
class containing the member. */
checks = get_deferred_access_checks ();
/* Look for the `>'. */
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* We just processed one more parameter list. */
++parser->num_template_parameter_lists;
/* If the next token is `template', there are more template
parameters. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TEMPLATE))
cp_parser_template_declaration_after_export (parser, member_p);
else if (cxx_dialect >= cxx0x
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_USING))
decl = cp_parser_alias_declaration (parser);
else
{
/* There are no access checks when parsing a template, as we do not
know if a specialization will be a friend. */
push_deferring_access_checks (dk_no_check);
token = cp_lexer_peek_token (parser->lexer);
decl = cp_parser_single_declaration (parser,
checks,
member_p,
/*explicit_specialization_p=*/false,
&friend_p);
pop_deferring_access_checks ();
/* If this is a member template declaration, let the front
end know. */
if (member_p && !friend_p && decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
cp_parser_check_access_in_redeclaration (decl, token->location);
decl = finish_member_template_decl (decl);
}
else if (friend_p && decl && TREE_CODE (decl) == TYPE_DECL)
make_friend_class (current_class_type, TREE_TYPE (decl),
/*complain=*/true);
}
/* We are done with the current parameter list. */
--parser->num_template_parameter_lists;
pop_deferring_access_checks ();
/* Finish up. */
finish_template_decl (parameter_list);
/* Check the template arguments for a literal operator template. */
if (decl
&& (TREE_CODE (decl) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (decl))
&& UDLIT_OPER_P (DECL_NAME (decl)))
{
bool ok = true;
if (parameter_list == NULL_TREE)
ok = false;
else
{
int num_parms = TREE_VEC_LENGTH (parameter_list);
if (num_parms != 1)
ok = false;
else
{
tree parm_list = TREE_VEC_ELT (parameter_list, 0);
tree parm = INNERMOST_TEMPLATE_PARMS (parm_list);
if (TREE_TYPE (parm) != char_type_node
|| !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
ok = false;
}
}
if (!ok)
error ("literal operator template %qD has invalid parameter list."
" Expected non-type template argument pack <char...>",
decl);
}
/* Register member declarations. */
if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl))
finish_member_declaration (decl);
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* If DECL is a function template, we must return to parse it later.
(Even though there is no definition, there might be default
arguments that need handling.) */
if (member_p && decl
&& (TREE_CODE (decl) == FUNCTION_DECL
|| DECL_FUNCTION_TEMPLATE_P (decl)))
VEC_safe_push (tree, gc, unparsed_funs_with_definitions, decl);
}
/* Perform the deferred access checks from a template-parameter-list.
CHECKS is a TREE_LIST of access checks, as returned by
get_deferred_access_checks. */
static void
cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)* checks)
{
++processing_template_parmlist;
perform_access_checks (checks);
--processing_template_parmlist;
}
/* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or
`function-definition' sequence. MEMBER_P is true, this declaration
appears in a class scope.
Returns the DECL for the declared entity. If FRIEND_P is non-NULL,
*FRIEND_P is set to TRUE iff the declaration is a friend. */
static tree
cp_parser_single_declaration (cp_parser* parser,
VEC (deferred_access_check,gc)* checks,
bool member_p,
bool explicit_specialization_p,
bool* friend_p)
{
int declares_class_or_enum;
tree decl = NULL_TREE;
cp_decl_specifier_seq decl_specifiers;
bool function_definition_p = false;
cp_token *decl_spec_token_start;
/* This function is only used when processing a template
declaration. */
gcc_assert (innermost_scope_kind () == sk_template_parms
|| innermost_scope_kind () == sk_template_spec);
/* Defer access checks until we know what is being declared. */
push_deferring_access_checks (dk_deferred);
/* Try the `decl-specifier-seq [opt] init-declarator [opt]'
alternative. */
decl_spec_token_start = cp_lexer_peek_token (parser->lexer);
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
if (friend_p)
*friend_p = cp_parser_friend_p (&decl_specifiers);
/* There are no template typedefs. */
if (decl_specifiers.specs[(int) ds_typedef])
{
error_at (decl_spec_token_start->location,
"template declaration of %<typedef%>");
decl = error_mark_node;
}
/* Gather up the access checks that occurred the
decl-specifier-seq. */
stop_deferring_access_checks ();
/* Check for the declaration of a template class. */
if (declares_class_or_enum)
{
if (cp_parser_declares_only_class_p (parser))
{
decl = shadow_tag (&decl_specifiers);
/* In this case:
struct C {
friend template <typename T> struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by shadow_tag. */
if (friend_p && *friend_p
&& !decl
&& decl_specifiers.type
&& TYPE_P (decl_specifiers.type))
decl = decl_specifiers.type;
if (decl && decl != error_mark_node)
decl = TYPE_NAME (decl);
else
decl = error_mark_node;
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
}
}
/* Complain about missing 'typename' or other invalid type names. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
{
/* cp_parser_parse_and_diagnose_invalid_type_name calls
cp_parser_skip_to_end_of_block_or_statement, so don't try to parse
the rest of this declaration. */
decl = error_mark_node;
goto out;
}
/* If it's not a template class, try for a template function. If
the next token is a `;', then this declaration does not declare
anything. But, if there were errors in the decl-specifiers, then
the error might well have come from an attempted class-specifier.
In that case, there's no need to warn about a missing declarator. */
if (!decl
&& (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)
|| decl_specifiers.type != error_mark_node))
{
decl = cp_parser_init_declarator (parser,
&decl_specifiers,
checks,
/*function_definition_allowed_p=*/true,
member_p,
declares_class_or_enum,
&function_definition_p,
NULL);
/* 7.1.1-1 [dcl.stc]
A storage-class-specifier shall not be specified in an explicit
specialization... */
if (decl
&& explicit_specialization_p
&& decl_specifiers.storage_class != sc_none)
{
error_at (decl_spec_token_start->location,
"explicit template specialization cannot have a storage class");
decl = error_mark_node;
}
}
/* Look for a trailing `;' after the declaration. */
if (!function_definition_p
&& (decl == error_mark_node
|| !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)))
cp_parser_skip_to_end_of_block_or_statement (parser);
out:
pop_deferring_access_checks ();
/* Clear any current qualification; whatever comes next is the start
of something new. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
return decl;
}
/* Parse a cast-expression that is not the operand of a unary "&". */
static tree
cp_parser_simple_cast_expression (cp_parser *parser)
{
return cp_parser_cast_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
}
/* Parse a functional cast to TYPE. Returns an expression
representing the cast. */
static tree
cp_parser_functional_cast (cp_parser* parser, tree type)
{
VEC(tree,gc) *vec;
tree expression_list;
tree cast;
bool nonconst_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expression_list = cp_parser_braced_list (parser, &nonconst_p);
CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1;
if (TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
return finish_compound_literal (type, expression_list,
tf_warning_or_error);
}
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/true,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
expression_list = error_mark_node;
else
{
expression_list = build_tree_list_vec (vec);
release_tree_vector (vec);
}
cast = build_functional_cast (type, expression_list,
tf_warning_or_error);
/* [expr.const]/1: In an integral constant expression "only type
conversions to integral or enumeration type can be used". */
if (TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
if (cast != error_mark_node
&& !cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser,
NIC_CONSTRUCTOR))
return error_mark_node;
return cast;
}
/* Save the tokens that make up the body of a member function defined
in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have
already been parsed. The ATTRIBUTES are any GNU "__attribute__"
specifiers applied to the declaration. Returns the FUNCTION_DECL
for the member function. */
static tree
cp_parser_save_member_function_body (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree attributes)
{
cp_token *first;
cp_token *last;
tree fn;
/* Create the FUNCTION_DECL. */
fn = grokmethod (decl_specifiers, declarator, attributes);
/* If something went badly wrong, bail out now. */
if (fn == error_mark_node)
{
/* If there's a function-body, skip it. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
/* Remember it, if there default args to post process. */
cp_parser_save_default_args (parser, fn);
/* Save away the tokens that make up the body of the
function. */
first = parser->lexer->next_token;
/* We can have braced-init-list mem-initializers before the fn body. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_lexer_consume_token (parser->lexer);
while (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& cp_lexer_next_token_is_not_keyword (parser->lexer, RID_TRY))
{
/* cache_group will stop after an un-nested { } pair, too. */
if (cp_parser_cache_group (parser, CPP_CLOSE_PAREN, /*depth=*/0))
break;
/* variadic mem-inits have ... after the ')'. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
cp_lexer_consume_token (parser->lexer);
}
}
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
/* Handle function try blocks. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH))
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
last = parser->lexer->next_token;
/* Save away the inline definition; we will process it when the
class is complete. */
DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last);
DECL_PENDING_INLINE_P (fn) = 1;
/* We need to know that this was defined in the class, so that
friend templates are handled correctly. */
DECL_INITIALIZED_IN_CLASS_P (fn) = 1;
/* Add FN to the queue of functions to be parsed later. */
VEC_safe_push (tree, gc, unparsed_funs_with_definitions, fn);
return fn;
}
/* Save the tokens that make up the in-class initializer for a non-static
data member. Returns a DEFAULT_ARG. */
static tree
cp_parser_save_nsdmi (cp_parser* parser)
{
return cp_parser_cache_defarg (parser, /*nsdmi=*/true);
}
/* Parse a template-argument-list, as well as the trailing ">" (but
not the opening "<"). See cp_parser_template_argument_list for the
return value. */
static tree
cp_parser_enclosed_template_argument_list (cp_parser* parser)
{
tree arguments;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
bool saved_greater_than_is_operator_p;
int saved_unevaluated_operand;
int saved_inhibit_evaluation_warnings;
/* [temp.names]
When parsing a template-id, the first non-nested `>' is taken as
the end of the template-argument-list rather than a greater-than
operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = false;
/* Parsing the argument list may modify SCOPE, so we save it
here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We need to evaluate the template arguments, even though this
template-id may be nested within a "sizeof". */
saved_unevaluated_operand = cp_unevaluated_operand;
cp_unevaluated_operand = 0;
saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
c_inhibit_evaluation_warnings = 0;
/* Parse the template-argument-list itself. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)
|| cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
arguments = NULL_TREE;
else
arguments = cp_parser_template_argument_list (parser);
/* Look for the `>' that ends the template-argument-list. If we find
a '>>' instead, it's probably just a typo. */
if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
if (cxx_dialect != cxx98)
{
/* In C++0x, a `>>' in a template argument list or cast
expression is considered to be two separate `>'
tokens. So, change the current token to a `>', but don't
consume it: it will be consumed later when the outer
template argument list (or cast expression) is parsed.
Note that this replacement of `>' for `>>' is necessary
even if we are parsing tentatively: in the tentative
case, after calling
cp_parser_enclosed_template_argument_list we will always
throw away all of the template arguments and the first
closing `>', either because the template argument list
was erroneous or because we are replacing those tokens
with a CPP_TEMPLATE_ID token. The second `>' (which will
not have been thrown away) is needed either to close an
outer template argument list or to complete a new-style
cast. */
cp_token *token = cp_lexer_peek_token (parser->lexer);
token->type = CPP_GREATER;
}
else if (!saved_greater_than_is_operator_p)
{
/* If we're in a nested template argument list, the '>>' has
to be a typo for '> >'. We emit the error message, but we
continue parsing and we push a '>' as next token, so that
the argument list will be parsed correctly. Note that the
global source location is still on the token before the
'>>', so we need to say explicitly where we want it. */
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location, "%<>>%> should be %<> >%> "
"within a nested template argument list");
token->type = CPP_GREATER;
}
else
{
/* If this is not a nested template argument list, the '>>'
is a typo for '>'. Emit an error message and continue.
Same deal about the token location, but here we can get it
right by consuming the '>>' before issuing the diagnostic. */
cp_token *token = cp_lexer_consume_token (parser->lexer);
error_at (token->location,
"spurious %<>>%>, use %<>%> to terminate "
"a template argument list");
}
}
else
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* The `>' token might be a greater-than operator again now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Restore the SAVED_SCOPE. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
return arguments;
}
/* MEMBER_FUNCTION is a member function, or a friend. If default
arguments, or the body of the function have not yet been parsed,
parse them now. */
static void
cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function)
{
timevar_push (TV_PARSE_INMETH);
/* If this member is a template, get the underlying
FUNCTION_DECL. */
if (DECL_FUNCTION_TEMPLATE_P (member_function))
member_function = DECL_TEMPLATE_RESULT (member_function);
/* There should not be any class definitions in progress at this
point; the bodies of members are only parsed outside of all class
definitions. */
gcc_assert (parser->num_classes_being_defined == 0);
/* While we're parsing the member functions we might encounter more
classes. We want to handle them right away, but we don't want
them getting mixed up with functions that are currently in the
queue. */
push_unparsed_function_queues (parser);
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (member_function);
/* If the body of the function has not yet been parsed, parse it
now. */
if (DECL_PENDING_INLINE_P (member_function))
{
tree function_scope;
cp_token_cache *tokens;
/* The function is no longer pending; we are processing it. */
tokens = DECL_PENDING_INLINE_INFO (member_function);
DECL_PENDING_INLINE_INFO (member_function) = NULL;
DECL_PENDING_INLINE_P (member_function) = 0;
/* If this is a local class, enter the scope of the containing
function. */
function_scope = current_function_decl;
if (function_scope)
push_function_context ();
/* Push the body of the function onto the lexer stack. */
cp_parser_push_lexer_for_tokens (parser, tokens);
/* Let the front end know that we going to be defining this
function. */
start_preparsed_function (member_function, NULL_TREE,
SF_PRE_PARSED | SF_INCLASS_INLINE);
/* Don't do access checking if it is a templated function. */
if (processing_template_decl)
push_deferring_access_checks (dk_no_check);
/* Now, parse the body of the function. */
cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/true);
if (processing_template_decl)
pop_deferring_access_checks ();
/* Leave the scope of the containing function. */
if (function_scope)
pop_function_context ();
cp_parser_pop_lexer (parser);
}
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
/* Restore the queue. */
pop_unparsed_function_queues (parser);
timevar_pop (TV_PARSE_INMETH);
}
/* If DECL contains any default args, remember it on the unparsed
functions queue. */
static void
cp_parser_save_default_args (cp_parser* parser, tree decl)
{
tree probe;
for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl));
probe;
probe = TREE_CHAIN (probe))
if (TREE_PURPOSE (probe))
{
cp_default_arg_entry *entry
= VEC_safe_push (cp_default_arg_entry, gc,
unparsed_funs_with_default_args, NULL);
entry->class_type = current_class_type;
entry->decl = decl;
break;
}
}
/* DEFAULT_ARG contains the saved tokens for the initializer of DECL,
which is either a FIELD_DECL or PARM_DECL. Parse it and return
the result. For a PARM_DECL, PARMTYPE is the corresponding type
from the parameter-type-list. */
static tree
cp_parser_late_parse_one_default_arg (cp_parser *parser, tree decl,
tree default_arg, tree parmtype)
{
cp_token_cache *tokens;
tree parsed_arg;
bool dummy;
if (default_arg == error_mark_node)
return error_mark_node;
/* Push the saved tokens for the default argument onto the parser's
lexer stack. */
tokens = DEFARG_TOKENS (default_arg);
cp_parser_push_lexer_for_tokens (parser, tokens);
start_lambda_scope (decl);
/* Parse the default argument. */
parsed_arg = cp_parser_initializer (parser, &dummy, &dummy);
if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
finish_lambda_scope ();
if (!processing_template_decl)
{
/* In a non-template class, check conversions now. In a template,
we'll wait and instantiate these as needed. */
if (TREE_CODE (decl) == PARM_DECL)
parsed_arg = check_default_argument (parmtype, parsed_arg);
else
{
int flags = LOOKUP_IMPLICIT;
if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg)
&& CONSTRUCTOR_IS_DIRECT_INIT (parsed_arg))
flags = LOOKUP_NORMAL;
parsed_arg = digest_init_flags (TREE_TYPE (decl), parsed_arg, flags);
}
}
/* If the token stream has not been completely used up, then
there was extra junk after the end of the default
argument. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
if (TREE_CODE (decl) == PARM_DECL)
cp_parser_error (parser, "expected %<,%>");
else
cp_parser_error (parser, "expected %<;%>");
}
/* Revert to the main lexer. */
cp_parser_pop_lexer (parser);
return parsed_arg;
}
/* FIELD is a non-static data member with an initializer which we saved for
later; parse it now. */
static void
cp_parser_late_parsing_nsdmi (cp_parser *parser, tree field)
{
tree def;
push_unparsed_function_queues (parser);
def = cp_parser_late_parse_one_default_arg (parser, field,
DECL_INITIAL (field),
NULL_TREE);
pop_unparsed_function_queues (parser);
DECL_INITIAL (field) = def;
}
/* FN is a FUNCTION_DECL which may contains a parameter with an
unparsed DEFAULT_ARG. Parse the default args now. This function
assumes that the current scope is the scope in which the default
argument should be processed. */
static void
cp_parser_late_parsing_default_args (cp_parser *parser, tree fn)
{
bool saved_local_variables_forbidden_p;
tree parm, parmdecl;
/* While we're parsing the default args, we might (due to the
statement expression extension) encounter more classes. We want
to handle them right away, but we don't want them getting mixed
up with default args that are currently in the queue. */
push_unparsed_function_queues (parser);
/* Local variable names (and the `this' keyword) may not appear
in a default argument. */
saved_local_variables_forbidden_p = parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
push_defarg_context (fn);
for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn)),
parmdecl = DECL_ARGUMENTS (fn);
parm && parm != void_list_node;
parm = TREE_CHAIN (parm),
parmdecl = DECL_CHAIN (parmdecl))
{
tree default_arg = TREE_PURPOSE (parm);
tree parsed_arg;
VEC(tree,gc) *insts;
tree copy;
unsigned ix;
if (!default_arg)
continue;
if (TREE_CODE (default_arg) != DEFAULT_ARG)
/* This can happen for a friend declaration for a function
already declared with default arguments. */
continue;
parsed_arg
= cp_parser_late_parse_one_default_arg (parser, parmdecl,
default_arg,
TREE_VALUE (parm));
if (parsed_arg == error_mark_node)
{
continue;
}
TREE_PURPOSE (parm) = parsed_arg;
/* Update any instantiations we've already created. */
for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0;
VEC_iterate (tree, insts, ix, copy); ix++)
TREE_PURPOSE (copy) = parsed_arg;
}
pop_defarg_context ();
/* Make sure no default arg is missing. */
check_default_args (fn);
/* Restore the state of local_variables_forbidden_p. */
parser->local_variables_forbidden_p = saved_local_variables_forbidden_p;
/* Restore the queue. */
pop_unparsed_function_queues (parser);
}
/* Parse the operand of `sizeof' (or a similar operator). Returns
either a TYPE or an expression, depending on the form of the
input. The KEYWORD indicates which kind of expression we have
encountered. */
static tree
cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword)
{
tree expr = NULL_TREE;
const char *saved_message;
char *tmp;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
bool pack_expansion_p = false;
/* Types cannot be defined in a `sizeof' expression. Save away the
old message. */
saved_message = parser->type_definition_forbidden_message;
/* And create the new one. */
tmp = concat ("types may not be defined in %<",
IDENTIFIER_POINTER (ridpointers[keyword]),
"%> expressions", NULL);
parser->type_definition_forbidden_message = tmp;
/* The restrictions on constant-expressions do not apply inside
sizeof expressions. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
/* If it's a `...', then we are computing the length of a parameter
pack. */
if (keyword == RID_SIZEOF
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
/* Note that this is an expansion. */
pack_expansion_p = true;
}
/* Do not actually evaluate the expression. */
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
/* If it's a `(', then we might be looking at the type-id
construction. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type;
bool saved_in_type_id_in_expr_p;
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-id. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Now, look for the trailing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, then we're done. */
if (cp_parser_parse_definitely (parser))
{
cp_decl_specifier_seq decl_specs;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type;
/* Call grokdeclarator to figure out what type this is. */
expr = grokdeclarator (NULL,
&decl_specs,
TYPENAME,
/*initialized=*/0,
/*attrlist=*/NULL);
}
}
/* If the type-id production did not work out, then we must be
looking at the unary-expression production. */
if (!expr)
expr = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (pack_expansion_p)
/* Build a pack expansion. */
expr = make_pack_expansion (expr);
/* Go back to evaluating expressions. */
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
/* Free the message we created. */
free (tmp);
/* And restore the old one. */
parser->type_definition_forbidden_message = saved_message;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expr;
}
/* If the current declaration has no declarator, return true. */
static bool
cp_parser_declares_only_class_p (cp_parser *parser)
{
/* If the next token is a `;' or a `,' then there is no
declarator. */
return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
}
/* Update the DECL_SPECS to reflect the storage class indicated by
KEYWORD. */
static void
cp_parser_set_storage_class (cp_parser *parser,
cp_decl_specifier_seq *decl_specs,
enum rid keyword,
location_t location)
{
cp_storage_class storage_class;
if (parser->in_unbraced_linkage_specification_p)
{
error_at (location, "invalid use of %qD in linkage specification",
ridpointers[keyword]);
return;
}
else if (decl_specs->storage_class != sc_none)
{
decl_specs->conflicting_specifiers_p = true;
return;
}
if ((keyword == RID_EXTERN || keyword == RID_STATIC)
&& decl_specs->specs[(int) ds_thread])
{
error_at (location, "%<__thread%> before %qD", ridpointers[keyword]);
decl_specs->specs[(int) ds_thread] = 0;
}
switch (keyword)
{
case RID_AUTO:
storage_class = sc_auto;
break;
case RID_REGISTER:
storage_class = sc_register;
break;
case RID_STATIC:
storage_class = sc_static;
break;
case RID_EXTERN:
storage_class = sc_extern;
break;
case RID_MUTABLE:
storage_class = sc_mutable;
break;
default:
gcc_unreachable ();
}
decl_specs->storage_class = storage_class;
/* A storage class specifier cannot be applied alongside a typedef
specifier. If there is a typedef specifier present then set
conflicting_specifiers_p which will trigger an error later
on in grokdeclarator. */
if (decl_specs->specs[(int)ds_typedef])
decl_specs->conflicting_specifiers_p = true;
}
/* Update the DECL_SPECS to reflect the TYPE_SPEC. If TYPE_DEFINITION_P
is true, the type is a class or enum definition. */
static void
cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs,
tree type_spec,
location_t location,
bool type_definition_p)
{
decl_specs->any_specifiers_p = true;
/* If the user tries to redeclare bool, char16_t, char32_t, or wchar_t
(with, for example, in "typedef int wchar_t;") we remember that
this is what happened. In system headers, we ignore these
declarations so that G++ can work with system headers that are not
C++-safe. */
if (decl_specs->specs[(int) ds_typedef]
&& !type_definition_p
&& (type_spec == boolean_type_node
|| type_spec == char16_type_node
|| type_spec == char32_type_node
|| type_spec == wchar_type_node)
&& (decl_specs->type
|| decl_specs->specs[(int) ds_long]
|| decl_specs->specs[(int) ds_short]
|| decl_specs->specs[(int) ds_unsigned]
|| decl_specs->specs[(int) ds_signed]))
{
decl_specs->redefined_builtin_type = type_spec;
if (!decl_specs->type)
{
decl_specs->type = type_spec;
decl_specs->type_definition_p = false;
decl_specs->type_location = location;
}
}
else if (decl_specs->type)
decl_specs->multiple_types_p = true;
else
{
decl_specs->type = type_spec;
decl_specs->type_definition_p = type_definition_p;
decl_specs->redefined_builtin_type = NULL_TREE;
decl_specs->type_location = location;
}
}
/* DECL_SPECIFIERS is the representation of a decl-specifier-seq.
Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */
static bool
cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers)
{
return decl_specifiers->specs[(int) ds_friend] != 0;
}
/* Issue an error message indicating that TOKEN_DESC was expected.
If KEYWORD is true, it indicated this function is called by
cp_parser_require_keword and the required token can only be
a indicated keyword. */
static void
cp_parser_required_error (cp_parser *parser,
required_token token_desc,
bool keyword)
{
switch (token_desc)
{
case RT_NEW:
cp_parser_error (parser, "expected %<new%>");
return;
case RT_DELETE:
cp_parser_error (parser, "expected %<delete%>");
return;
case RT_RETURN:
cp_parser_error (parser, "expected %<return%>");
return;
case RT_WHILE:
cp_parser_error (parser, "expected %<while%>");
return;
case RT_EXTERN:
cp_parser_error (parser, "expected %<extern%>");
return;
case RT_STATIC_ASSERT:
cp_parser_error (parser, "expected %<static_assert%>");
return;
case RT_DECLTYPE:
cp_parser_error (parser, "expected %<decltype%>");
return;
case RT_OPERATOR:
cp_parser_error (parser, "expected %<operator%>");
return;
case RT_CLASS:
cp_parser_error (parser, "expected %<class%>");
return;
case RT_TEMPLATE:
cp_parser_error (parser, "expected %<template%>");
return;
case RT_NAMESPACE:
cp_parser_error (parser, "expected %<namespace%>");
return;
case RT_USING:
cp_parser_error (parser, "expected %<using%>");
return;
case RT_ASM:
cp_parser_error (parser, "expected %<asm%>");
return;
case RT_TRY:
cp_parser_error (parser, "expected %<try%>");
return;
case RT_CATCH:
cp_parser_error (parser, "expected %<catch%>");
return;
case RT_THROW:
cp_parser_error (parser, "expected %<throw%>");
return;
case RT_LABEL:
cp_parser_error (parser, "expected %<__label__%>");
return;
case RT_AT_TRY:
cp_parser_error (parser, "expected %<@try%>");
return;
case RT_AT_SYNCHRONIZED:
cp_parser_error (parser, "expected %<@synchronized%>");
return;
case RT_AT_THROW:
cp_parser_error (parser, "expected %<@throw%>");
return;
case RT_TRANSACTION_ATOMIC:
cp_parser_error (parser, "expected %<__transaction_atomic%>");
return;
case RT_TRANSACTION_RELAXED:
cp_parser_error (parser, "expected %<__transaction_relaxed%>");
return;
default:
break;
}
if (!keyword)
{
switch (token_desc)
{
case RT_SEMICOLON:
cp_parser_error (parser, "expected %<;%>");
return;
case RT_OPEN_PAREN:
cp_parser_error (parser, "expected %<(%>");
return;
case RT_CLOSE_BRACE:
cp_parser_error (parser, "expected %<}%>");
return;
case RT_OPEN_BRACE:
cp_parser_error (parser, "expected %<{%>");
return;
case RT_CLOSE_SQUARE:
cp_parser_error (parser, "expected %<]%>");
return;
case RT_OPEN_SQUARE:
cp_parser_error (parser, "expected %<[%>");
return;
case RT_COMMA:
cp_parser_error (parser, "expected %<,%>");
return;
case RT_SCOPE:
cp_parser_error (parser, "expected %<::%>");
return;
case RT_LESS:
cp_parser_error (parser, "expected %<<%>");
return;
case RT_GREATER:
cp_parser_error (parser, "expected %<>%>");
return;
case RT_EQ:
cp_parser_error (parser, "expected %<=%>");
return;
case RT_ELLIPSIS:
cp_parser_error (parser, "expected %<...%>");
return;
case RT_MULT:
cp_parser_error (parser, "expected %<*%>");
return;
case RT_COMPL:
cp_parser_error (parser, "expected %<~%>");
return;
case RT_COLON:
cp_parser_error (parser, "expected %<:%>");
return;
case RT_COLON_SCOPE:
cp_parser_error (parser, "expected %<:%> or %<::%>");
return;
case RT_CLOSE_PAREN:
cp_parser_error (parser, "expected %<)%>");
return;
case RT_COMMA_CLOSE_PAREN:
cp_parser_error (parser, "expected %<,%> or %<)%>");
return;
case RT_PRAGMA_EOL:
cp_parser_error (parser, "expected end of line");
return;
case RT_NAME:
cp_parser_error (parser, "expected identifier");
return;
case RT_SELECT:
cp_parser_error (parser, "expected selection-statement");
return;
case RT_INTERATION:
cp_parser_error (parser, "expected iteration-statement");
return;
case RT_JUMP:
cp_parser_error (parser, "expected jump-statement");
return;
case RT_CLASS_KEY:
cp_parser_error (parser, "expected class-key");
return;
case RT_CLASS_TYPENAME_TEMPLATE:
cp_parser_error (parser,
"expected %<class%>, %<typename%>, or %<template%>");
return;
default:
gcc_unreachable ();
}
}
else
gcc_unreachable ();
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require (cp_parser* parser,
enum cpp_ttype type,
required_token token_desc)
{
if (cp_lexer_next_token_is (parser->lexer, type))
return cp_lexer_consume_token (parser->lexer);
else
{
/* Output the MESSAGE -- unless we're parsing tentatively. */
if (!cp_parser_simulate_error (parser))
cp_parser_required_error (parser, token_desc, /*keyword=*/false);
return NULL;
}
}
/* An error message is produced if the next token is not '>'.
All further tokens are skipped until the desired token is
found or '{', '}', ';' or an unbalanced ')' or ']'. */
static void
cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser)
{
/* Current level of '< ... >'. */
unsigned level = 0;
/* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */
unsigned nesting_depth = 0;
/* Are we ready, yet? If not, issue error message. */
if (cp_parser_require (parser, CPP_GREATER, RT_GREATER))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_LESS:
if (!nesting_depth)
++level;
break;
case CPP_RSHIFT:
if (cxx_dialect == cxx98)
/* C++0x views the `>>' operator as two `>' tokens, but
C++98 does not. */
break;
else if (!nesting_depth && level-- == 0)
{
/* We've hit a `>>' where the first `>' closes the
template argument list, and the second `>' is
spurious. Just consume the `>>' and stop; we've
already produced at least one error. */
cp_lexer_consume_token (parser->lexer);
return;
}
/* Fall through for C++0x, so we handle the second `>' in
the `>>'. */
case CPP_GREATER:
if (!nesting_depth && level-- == 0)
{
/* We've reached the token we want, consume it and stop. */
cp_lexer_consume_token (parser->lexer);
return;
}
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
++nesting_depth;
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
if (nesting_depth-- == 0)
return;
break;
case CPP_EOF:
case CPP_PRAGMA_EOL:
case CPP_SEMICOLON:
case CPP_OPEN_BRACE:
case CPP_CLOSE_BRACE:
/* The '>' was probably forgotten, don't look further. */
return;
default:
break;
}
/* Consume this token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require_keyword (cp_parser* parser,
enum rid keyword,
required_token token_desc)
{
cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc);
if (token && token->keyword != keyword)
{
cp_parser_required_error (parser, token_desc, /*keyword=*/true);
return NULL;
}
return token;
}
/* Returns TRUE iff TOKEN is a token that can begin the body of a
function-definition. */
static bool
cp_parser_token_starts_function_definition_p (cp_token* token)
{
return (/* An ordinary function-body begins with an `{'. */
token->type == CPP_OPEN_BRACE
/* A ctor-initializer begins with a `:'. */
|| token->type == CPP_COLON
/* A function-try-block begins with `try'. */
|| token->keyword == RID_TRY
/* A function-transaction-block begins with `__transaction_atomic'
or `__transaction_relaxed'. */
|| token->keyword == RID_TRANSACTION_ATOMIC
|| token->keyword == RID_TRANSACTION_RELAXED
/* The named return value extension begins with `return'. */
|| token->keyword == RID_RETURN);
}
/* Returns TRUE iff the next token is the ":" or "{" beginning a class
definition. */
static bool
cp_parser_next_token_starts_class_definition_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_OPEN_BRACE || token->type == CPP_COLON);
}
/* Returns TRUE iff the next token is the "," or ">" (or `>>', in
C++0x) ending a template-argument. */
static bool
cp_parser_next_token_ends_template_argument_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_COMMA
|| token->type == CPP_GREATER
|| token->type == CPP_ELLIPSIS
|| ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT));
}
/* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the
(n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */
static bool
cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser,
size_t n)
{
cp_token *token;
token = cp_lexer_peek_nth_token (parser->lexer, n);
if (token->type == CPP_LESS)
return true;
/* Check for the sequence `<::' in the original code. It would be lexed as
`[:', where `[' is a digraph, and there is no whitespace before
`:'. */
if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH)
{
cp_token *token2;
token2 = cp_lexer_peek_nth_token (parser->lexer, n+1);
if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE))
return true;
}
return false;
}
/* Returns the kind of tag indicated by TOKEN, if it is a class-key,
or none_type otherwise. */
static enum tag_types
cp_parser_token_is_class_key (cp_token* token)
{
switch (token->keyword)
{
case RID_CLASS:
return class_type;
case RID_STRUCT:
return record_type;
case RID_UNION:
return union_type;
default:
return none_type;
}
}
/* Issue an error message if the CLASS_KEY does not match the TYPE. */
static void
cp_parser_check_class_key (enum tag_types class_key, tree type)
{
if (type == error_mark_node)
return;
if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type))
{
permerror (input_location, "%qs tag used in naming %q#T",
class_key == union_type ? "union"
: class_key == record_type ? "struct" : "class",
type);
inform (DECL_SOURCE_LOCATION (TYPE_NAME (type)),
"%q#T was previously declared here", type);
}
}
/* Issue an error message if DECL is redeclared with different
access than its original declaration [class.access.spec/3].
This applies to nested classes and nested class templates.
[class.mem/1]. */
static void
cp_parser_check_access_in_redeclaration (tree decl, location_t location)
{
if (!decl || !CLASS_TYPE_P (TREE_TYPE (decl)))
return;
if ((TREE_PRIVATE (decl)
!= (current_access_specifier == access_private_node))
|| (TREE_PROTECTED (decl)
!= (current_access_specifier == access_protected_node)))
error_at (location, "%qD redeclared with different access", decl);
}
/* Look for the `template' keyword, as a syntactic disambiguator.
Return TRUE iff it is present, in which case it will be
consumed. */
static bool
cp_parser_optional_template_keyword (cp_parser *parser)
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* The `template' keyword can only be used within templates;
outside templates the parser can always figure out what is a
template and what is not. */
if (!processing_template_decl)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"%<template%> (as a disambiguator) is only allowed "
"within templates");
/* If this part of the token stream is rescanned, the same
error message would be generated. So, we purge the token
from the stream. */
cp_lexer_purge_token (parser->lexer);
return false;
}
else
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
return true;
}
}
return false;
}
/* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token,
set PARSER->SCOPE, and perform other related actions. */
static void
cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser)
{
int i;
struct tree_check *check_value;
deferred_access_check *chk;
VEC (deferred_access_check,gc) *checks;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
checks = check_value->checks;
if (checks)
{
FOR_EACH_VEC_ELT (deferred_access_check, checks, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl);
}
/* Set the scope from the stored value. */
parser->scope = check_value->value;
parser->qualifying_scope = check_value->qualifying_scope;
parser->object_scope = NULL_TREE;
}
/* Consume tokens up through a non-nested END token. Returns TRUE if we
encounter the end of a block before what we were looking for. */
static bool
cp_parser_cache_group (cp_parser *parser,
enum cpp_ttype end,
unsigned depth)
{
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Abort a parenthesized expression if we encounter a semicolon. */
if ((end == CPP_CLOSE_PAREN || depth == 0)
&& token->type == CPP_SEMICOLON)
return true;
/* If we've reached the end of the file, stop. */
if (token->type == CPP_EOF
|| (end != CPP_PRAGMA_EOL
&& token->type == CPP_PRAGMA_EOL))
return true;
if (token->type == CPP_CLOSE_BRACE && depth == 0)
/* We've hit the end of an enclosing block, so there's been some
kind of syntax error. */
return true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* See if it starts a new group. */
if (token->type == CPP_OPEN_BRACE)
{
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1);
/* In theory this should probably check end == '}', but
cp_parser_save_member_function_body needs it to exit
after either '}' or ')' when called with ')'. */
if (depth == 0)
return false;
}
else if (token->type == CPP_OPEN_PAREN)
{
cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1);
if (depth == 0 && end == CPP_CLOSE_PAREN)
return false;
}
else if (token->type == CPP_PRAGMA)
cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1);
else if (token->type == end)
return false;
}
}
/* Like above, for caching a default argument or NSDMI. Both of these are
terminated by a non-nested comma, but it can be unclear whether or not a
comma is nested in a template argument list unless we do more parsing.
In order to handle this ambiguity, when we encounter a ',' after a '<'
we try to parse what follows as a parameter-declaration-list (in the
case of a default argument) or a member-declarator (in the case of an
NSDMI). If that succeeds, then we stop caching. */
static tree
cp_parser_cache_defarg (cp_parser *parser, bool nsdmi)
{
unsigned depth = 0;
int maybe_template_id = 0;
cp_token *first_token;
cp_token *token;
tree default_argument;
/* Add tokens until we have processed the entire default
argument. We add the range [first_token, token). */
first_token = cp_lexer_peek_token (parser->lexer);
if (first_token->type == CPP_OPEN_BRACE)
{
/* For list-initialization, this is straightforward. */
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
token = cp_lexer_peek_token (parser->lexer);
}
else while (true)
{
bool done = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* What we do depends on what token we have. */
switch (token->type)
{
/* In valid code, a default argument must be
immediately followed by a `,' `)', or `...'. */
case CPP_COMMA:
if (depth == 0 && maybe_template_id)
{
/* If we've seen a '<', we might be in a
template-argument-list. Until Core issue 325 is
resolved, we don't know how this situation ought
to be handled, so try to DTRT. We check whether
what comes after the comma is a valid parameter
declaration list. If it is, then the comma ends
the default argument; otherwise the default
argument continues. */
bool error = false;
tree t;
/* Set ITALP so cp_parser_parameter_declaration_list
doesn't decide to commit to this parse. */
bool saved_italp = parser->in_template_argument_list_p;
parser->in_template_argument_list_p = true;
cp_parser_parse_tentatively (parser);
cp_lexer_consume_token (parser->lexer);
if (nsdmi)
{
int ctor_dtor_or_conv_p;
cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/true);
}
else
{
begin_scope (sk_function_parms, NULL_TREE);
cp_parser_parameter_declaration_list (parser, &error);
for (t = current_binding_level->names; t; t = DECL_CHAIN (t))
pop_binding (DECL_NAME (t), t);
leave_scope ();
}
if (!cp_parser_error_occurred (parser) && !error)
done = true;
cp_parser_abort_tentative_parse (parser);
parser->in_template_argument_list_p = saved_italp;
break;
}
case CPP_CLOSE_PAREN:
case CPP_ELLIPSIS:
/* If we run into a non-nested `;', `}', or `]',
then the code is invalid -- but the default
argument is certainly over. */
case CPP_SEMICOLON:
case CPP_CLOSE_BRACE:
case CPP_CLOSE_SQUARE:
if (depth == 0)
done = true;
/* Update DEPTH, if necessary. */
else if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_SQUARE)
--depth;
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
case CPP_OPEN_BRACE:
++depth;
break;
case CPP_LESS:
if (depth == 0)
/* This might be the comparison operator, or it might
start a template argument list. */
++maybe_template_id;
break;
case CPP_RSHIFT:
if (cxx_dialect == cxx98)
break;
/* Fall through for C++0x, which treats the `>>'
operator like two `>' tokens in certain
cases. */
case CPP_GREATER:
if (depth == 0)
{
/* This might be an operator, or it might close a
template argument list. But if a previous '<'
started a template argument list, this will have
closed it, so we can't be in one anymore. */
maybe_template_id -= 1 + (token->type == CPP_RSHIFT);
if (maybe_template_id < 0)
maybe_template_id = 0;
}
break;
/* If we run out of tokens, issue an error message. */
case CPP_EOF:
case CPP_PRAGMA_EOL:
error_at (token->location, "file ends in default argument");
done = true;
break;
case CPP_NAME:
case CPP_SCOPE:
/* In these cases, we should look for template-ids.
For example, if the default argument is
`X<int, double>()', we need to do name lookup to
figure out whether or not `X' is a template; if
so, the `,' does not end the default argument.
That is not yet done. */
break;
default:
break;
}
/* If we've reached the end, stop. */
if (done)
break;
/* Add the token to the token block. */
token = cp_lexer_consume_token (parser->lexer);
}
/* Create a DEFAULT_ARG to represent the unparsed default
argument. */
default_argument = make_node (DEFAULT_ARG);
DEFARG_TOKENS (default_argument)
= cp_token_cache_new (first_token, token);
DEFARG_INSTANTIATIONS (default_argument) = NULL;
return default_argument;
}
/* Begin parsing tentatively. We always save tokens while parsing
tentatively so that if the tentative parsing fails we can restore the
tokens. */
static void
cp_parser_parse_tentatively (cp_parser* parser)
{
/* Enter a new parsing context. */
parser->context = cp_parser_context_new (parser->context);
/* Begin saving tokens. */
cp_lexer_save_tokens (parser->lexer);
/* In order to avoid repetitive access control error messages,
access checks are queued up until we are no longer parsing
tentatively. */
push_deferring_access_checks (dk_deferred);
}
/* Commit to the currently active tentative parse. */
static void
cp_parser_commit_to_tentative_parse (cp_parser* parser)
{
cp_parser_context *context;
cp_lexer *lexer;
/* Mark all of the levels as committed. */
lexer = parser->lexer;
for (context = parser->context; context->next; context = context->next)
{
if (context->status == CP_PARSER_STATUS_KIND_COMMITTED)
break;
context->status = CP_PARSER_STATUS_KIND_COMMITTED;
while (!cp_lexer_saving_tokens (lexer))
lexer = lexer->next;
cp_lexer_commit_tokens (lexer);
}
}
/* Abort the currently active tentative parse. All consumed tokens
will be rolled back, and no diagnostics will be issued. */
static void
cp_parser_abort_tentative_parse (cp_parser* parser)
{
gcc_assert (parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED
|| errorcount > 0);
cp_parser_simulate_error (parser);
/* Now, pretend that we want to see if the construct was
successfully parsed. */
cp_parser_parse_definitely (parser);
}
/* Stop parsing tentatively. If a parse error has occurred, restore the
token stream. Otherwise, commit to the tokens we have consumed.
Returns true if no error occurred; false otherwise. */
static bool
cp_parser_parse_definitely (cp_parser* parser)
{
bool error_occurred;
cp_parser_context *context;
/* Remember whether or not an error occurred, since we are about to
destroy that information. */
error_occurred = cp_parser_error_occurred (parser);
/* Remove the topmost context from the stack. */
context = parser->context;
parser->context = context->next;
/* If no parse errors occurred, commit to the tentative parse. */
if (!error_occurred)
{
/* Commit to the tokens read tentatively, unless that was
already done. */
if (context->status != CP_PARSER_STATUS_KIND_COMMITTED)
cp_lexer_commit_tokens (parser->lexer);
pop_to_parent_deferring_access_checks ();
}
/* Otherwise, if errors occurred, roll back our state so that things
are just as they were before we began the tentative parse. */
else
{
cp_lexer_rollback_tokens (parser->lexer);
pop_deferring_access_checks ();
}
/* Add the context to the front of the free list. */
context->next = cp_parser_context_free_list;
cp_parser_context_free_list = context;
return !error_occurred;
}
/* Returns true if we are parsing tentatively and are not committed to
this tentative parse. */
static bool
cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED);
}
/* Returns nonzero iff an error has occurred during the most recent
tentative parse. */
static bool
cp_parser_error_occurred (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status == CP_PARSER_STATUS_KIND_ERROR);
}
/* Returns nonzero if GNU extensions are allowed. */
static bool
cp_parser_allow_gnu_extensions_p (cp_parser* parser)
{
return parser->allow_gnu_extensions_p;
}
/* Objective-C++ Productions */
/* Parse an Objective-C expression, which feeds into a primary-expression
above.
objc-expression:
objc-message-expression
objc-string-literal
objc-encode-expression
objc-protocol-expression
objc-selector-expression
Returns a tree representation of the expression. */
static tree
cp_parser_objc_expression (cp_parser* parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->type)
{
case CPP_OPEN_SQUARE:
return cp_parser_objc_message_expression (parser);
case CPP_OBJC_STRING:
kwd = cp_lexer_consume_token (parser->lexer);
return objc_build_string_object (kwd->u.value);
case CPP_KEYWORD:
switch (kwd->keyword)
{
case RID_AT_ENCODE:
return cp_parser_objc_encode_expression (parser);
case RID_AT_PROTOCOL:
return cp_parser_objc_protocol_expression (parser);
case RID_AT_SELECTOR:
return cp_parser_objc_selector_expression (parser);
default:
break;
}
default:
error_at (kwd->location,
"misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* Parse an Objective-C message expression.
objc-message-expression:
[ objc-message-receiver objc-message-args ]
Returns a representation of an Objective-C message. */
static tree
cp_parser_objc_message_expression (cp_parser* parser)
{
tree receiver, messageargs;
cp_lexer_consume_token (parser->lexer); /* Eat '['. */
receiver = cp_parser_objc_message_receiver (parser);
messageargs = cp_parser_objc_message_args (parser);
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return objc_build_message_expr (receiver, messageargs);
}
/* Parse an objc-message-receiver.
objc-message-receiver:
expression
simple-type-specifier
Returns a representation of the type or expression. */
static tree
cp_parser_objc_message_receiver (cp_parser* parser)
{
tree rcv;
/* An Objective-C message receiver may be either (1) a type
or (2) an expression. */
cp_parser_parse_tentatively (parser);
rcv = cp_parser_expression (parser, false, NULL);
if (cp_parser_parse_definitely (parser))
return rcv;
rcv = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
return objc_get_class_reference (rcv);
}
/* Parse the arguments and selectors comprising an Objective-C message.
objc-message-args:
objc-selector
objc-selector-args
objc-selector-args , objc-comma-args
objc-selector-args:
objc-selector [opt] : assignment-expression
objc-selector-args objc-selector [opt] : assignment-expression
objc-comma-args:
assignment-expression
objc-comma-args , assignment-expression
Returns a TREE_LIST, with TREE_PURPOSE containing a list of
selector arguments and TREE_VALUE containing a list of comma
arguments. */
static tree
cp_parser_objc_message_args (cp_parser* parser)
{
tree sel_args = NULL_TREE, addl_args = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, arg;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
return build_tree_list (selector, NULL_TREE);
maybe_unary_selector_p = false;
cp_parser_require (parser, CPP_COLON, RT_COLON);
arg = cp_parser_assignment_expression (parser, false, NULL);
sel_args
= chainon (sel_args,
build_tree_list (selector, arg));
token = cp_lexer_peek_token (parser->lexer);
}
/* Handle non-selector arguments, if any. */
while (token->type == CPP_COMMA)
{
tree arg;
cp_lexer_consume_token (parser->lexer);
arg = cp_parser_assignment_expression (parser, false, NULL);
addl_args
= chainon (addl_args,
build_tree_list (NULL_TREE, arg));
token = cp_lexer_peek_token (parser->lexer);
}
if (sel_args == NULL_TREE && addl_args == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ message argument(s) are expected");
return build_tree_list (error_mark_node, error_mark_node);
}
return build_tree_list (sel_args, addl_args);
}
/* Parse an Objective-C encode expression.
objc-encode-expression:
@encode objc-typename
Returns an encoded representation of the type argument. */
static tree
cp_parser_objc_encode_expression (cp_parser* parser)
{
tree type;
cp_token *token;
cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
token = cp_lexer_peek_token (parser->lexer);
type = complete_type (cp_parser_type_id (parser));
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
if (!type)
{
error_at (token->location,
"%<@encode%> must specify a type as an argument");
return error_mark_node;
}
/* This happens if we find @encode(T) (where T is a template
typename or something dependent on a template typename) when
parsing a template. In that case, we can't compile it
immediately, but we rather create an AT_ENCODE_EXPR which will
need to be instantiated when the template is used.
*/
if (dependent_type_p (type))
{
tree value = build_min (AT_ENCODE_EXPR, size_type_node, type);
TREE_READONLY (value) = 1;
return value;
}
return objc_build_encode_expr (type);
}
/* Parse an Objective-C @defs expression. */
static tree
cp_parser_objc_defs_expression (cp_parser *parser)
{
tree name;
cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
name = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_get_class_ivars (name);
}
/* Parse an Objective-C protocol expression.
objc-protocol-expression:
@protocol ( identifier )
Returns a representation of the protocol expression. */
static tree
cp_parser_objc_protocol_expression (cp_parser* parser)
{
tree proto;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
proto = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_build_protocol_expr (proto);
}
/* Parse an Objective-C selector expression.
objc-selector-expression:
@selector ( objc-method-signature )
objc-method-signature:
objc-selector
objc-selector-seq
objc-selector-seq:
objc-selector :
objc-selector-seq objc-selector :
Returns a representation of the method selector. */
static tree
cp_parser_objc_selector_expression (cp_parser* parser)
{
tree sel_seq = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON
|| token->type == CPP_SCOPE)
{
tree selector = NULL_TREE;
if (token->type != CPP_COLON
|| token->type == CPP_SCOPE)
selector = cp_parser_objc_selector (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE))
{
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p)
{
sel_seq = selector;
goto finish_selector;
}
else
{
cp_parser_error (parser, "expected %<:%>");
}
}
maybe_unary_selector_p = false;
token = cp_lexer_consume_token (parser->lexer);
if (token->type == CPP_SCOPE)
{
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
sel_seq
= chainon (sel_seq,
build_tree_list (NULL_TREE, NULL_TREE));
}
else
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
token = cp_lexer_peek_token (parser->lexer);
}
finish_selector:
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_build_selector_expr (loc, sel_seq);
}
/* Parse a list of identifiers.
objc-identifier-list:
identifier
objc-identifier-list , identifier
Returns a TREE_LIST of identifier nodes. */
static tree
cp_parser_objc_identifier_list (cp_parser* parser)
{
tree identifier;
tree list;
cp_token *sep;
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
list = build_tree_list (NULL_TREE, identifier);
sep = cp_lexer_peek_token (parser->lexer);
while (sep->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return list;
list = chainon (list, build_tree_list (NULL_TREE,
identifier));
sep = cp_lexer_peek_token (parser->lexer);
}
return list;
}
/* Parse an Objective-C alias declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
This function registers the alias mapping with the Objective-C front end.
It returns nothing. */
static void
cp_parser_objc_alias_declaration (cp_parser* parser)
{
tree alias, orig;
cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */
alias = cp_parser_identifier (parser);
orig = cp_parser_identifier (parser);
objc_declare_alias (alias, orig);
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an Objective-C class forward-declaration.
objc-class-declaration:
@class objc-identifier-list ;
The function registers the forward declarations with the Objective-C
front end. It returns nothing. */
static void
cp_parser_objc_class_declaration (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */
while (true)
{
tree id;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
break;
objc_declare_class (id);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse a list of Objective-C protocol references.
objc-protocol-refs-opt:
objc-protocol-refs [opt]
objc-protocol-refs:
< objc-identifier-list >
Returns a TREE_LIST of identifiers, if any. */
static tree
cp_parser_objc_protocol_refs_opt (cp_parser* parser)
{
tree protorefs = NULL_TREE;
if(cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
cp_lexer_consume_token (parser->lexer); /* Eat '<'. */
protorefs = cp_parser_objc_identifier_list (parser);
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
}
return protorefs;
}
/* Parse a Objective-C visibility specification. */
static void
cp_parser_objc_visibility_spec (cp_parser* parser)
{
cp_token *vis = cp_lexer_peek_token (parser->lexer);
switch (vis->keyword)
{
case RID_AT_PRIVATE:
objc_set_visibility (OBJC_IVAR_VIS_PRIVATE);
break;
case RID_AT_PROTECTED:
objc_set_visibility (OBJC_IVAR_VIS_PROTECTED);
break;
case RID_AT_PUBLIC:
objc_set_visibility (OBJC_IVAR_VIS_PUBLIC);
break;
case RID_AT_PACKAGE:
objc_set_visibility (OBJC_IVAR_VIS_PACKAGE);
break;
default:
return;
}
/* Eat '@private'/'@protected'/'@public'. */
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C method type. Return 'true' if it is a class
(+) method, and 'false' if it is an instance (-) method. */
static inline bool
cp_parser_objc_method_type (cp_parser* parser)
{
if (cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS)
return true;
else
return false;
}
/* Parse an Objective-C protocol qualifier. */
static tree
cp_parser_objc_protocol_qualifiers (cp_parser* parser)
{
tree quals = NULL_TREE, node;
cp_token *token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
while (node && TREE_CODE (node) == IDENTIFIER_NODE
&& (node == ridpointers [(int) RID_IN]
|| node == ridpointers [(int) RID_OUT]
|| node == ridpointers [(int) RID_INOUT]
|| node == ridpointers [(int) RID_BYCOPY]
|| node == ridpointers [(int) RID_BYREF]
|| node == ridpointers [(int) RID_ONEWAY]))
{
quals = tree_cons (NULL_TREE, node, quals);
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
}
return quals;
}
/* Parse an Objective-C typename. */
static tree
cp_parser_objc_typename (cp_parser* parser)
{
tree type_name = NULL_TREE;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree proto_quals, cp_type = NULL_TREE;
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
proto_quals = cp_parser_objc_protocol_qualifiers (parser);
/* An ObjC type name may consist of just protocol qualifiers, in which
case the type shall default to 'id'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
{
cp_type = cp_parser_type_id (parser);
/* If the type could not be parsed, an error has already
been produced. For error recovery, behave as if it had
not been specified, which will use the default type
'id'. */
if (cp_type == error_mark_node)
{
cp_type = NULL_TREE;
/* We need to skip to the closing parenthesis as
cp_parser_type_id() does not seem to do it for
us. */
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/false);
}
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
type_name = build_tree_list (proto_quals, cp_type);
}
return type_name;
}
/* Check to see if TYPE refers to an Objective-C selector name. */
static bool
cp_parser_objc_selector_p (enum cpp_ttype type)
{
return (type == CPP_NAME || type == CPP_KEYWORD
|| type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND
|| type == CPP_OR || type == CPP_COMPL || type == CPP_NOT
|| type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ
|| type == CPP_XOR || type == CPP_XOR_EQ);
}
/* Parse an Objective-C selector. */
static tree
cp_parser_objc_selector (cp_parser* parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
if (!cp_parser_objc_selector_p (token->type))
{
error_at (token->location, "invalid Objective-C++ selector name");
return error_mark_node;
}
/* C++ operator names are allowed to appear in ObjC selectors. */
switch (token->type)
{
case CPP_AND_AND: return get_identifier ("and");
case CPP_AND_EQ: return get_identifier ("and_eq");
case CPP_AND: return get_identifier ("bitand");
case CPP_OR: return get_identifier ("bitor");
case CPP_COMPL: return get_identifier ("compl");
case CPP_NOT: return get_identifier ("not");
case CPP_NOT_EQ: return get_identifier ("not_eq");
case CPP_OR_OR: return get_identifier ("or");
case CPP_OR_EQ: return get_identifier ("or_eq");
case CPP_XOR: return get_identifier ("xor");
case CPP_XOR_EQ: return get_identifier ("xor_eq");
default: return token->u.value;
}
}
/* Parse an Objective-C params list. */
static tree
cp_parser_objc_method_keyword_params (cp_parser* parser, tree* attributes)
{
tree params = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, type_name, identifier;
tree parm_attr = NULL_TREE;
if (token->keyword == RID_ATTRIBUTE)
break;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
params = selector; /* Might be followed by attributes. */
break;
}
maybe_unary_selector_p = false;
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
{
/* Something went quite wrong. There should be a colon
here, but there is not. Stop parsing parameters. */
break;
}
type_name = cp_parser_objc_typename (parser);
/* New ObjC allows attributes on parameters too. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
parm_attr = cp_parser_attributes_opt (parser);
identifier = cp_parser_identifier (parser);
params
= chainon (params,
objc_build_keyword_decl (selector,
type_name,
identifier,
parm_attr));
token = cp_lexer_peek_token (parser->lexer);
}
if (params == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ method declaration is expected");
return error_mark_node;
}
/* We allow tail attributes for the method. */
if (token->keyword == RID_ATTRIBUTE)
{
*attributes = cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
return params;
cp_parser_error (parser,
"method attributes must be specified at the end");
return error_mark_node;
}
if (params == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ method declaration is expected");
return error_mark_node;
}
return params;
}
/* Parse the non-keyword Objective-C params. */
static tree
cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp,
tree* attributes)
{
tree params = make_node (TREE_LIST);
cp_token *token = cp_lexer_peek_token (parser->lexer);
*ellipsisp = false; /* Initially, assume no ellipsis. */
while (token->type == CPP_COMMA)
{
cp_parameter_declarator *parmdecl;
tree parm;
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_ELLIPSIS)
{
cp_lexer_consume_token (parser->lexer); /* Eat '...'. */
*ellipsisp = true;
token = cp_lexer_peek_token (parser->lexer);
break;
}
/* TODO: parse attributes for tail parameters. */
parmdecl = cp_parser_parameter_declaration (parser, false, NULL);
parm = grokdeclarator (parmdecl->declarator,
&parmdecl->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
chainon (params, build_tree_list (NULL_TREE, parm));
token = cp_lexer_peek_token (parser->lexer);
}
/* We allow tail attributes for the method. */
if (token->keyword == RID_ATTRIBUTE)
{
if (*attributes == NULL_TREE)
{
*attributes = cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
return params;
}
else
/* We have an error, but parse the attributes, so that we can
carry on. */
*attributes = cp_parser_attributes_opt (parser);
cp_parser_error (parser,
"method attributes must be specified at the end");
return error_mark_node;
}
return params;
}
/* Parse a linkage specification, a pragma, an extra semicolon or a block. */
static void
cp_parser_objc_interstitial_code (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token->keyword == RID_EXTERN
&& cp_parser_is_pure_string_literal
(cp_lexer_peek_nth_token (parser->lexer, 2)))
cp_parser_linkage_specification (parser);
/* Handle #pragma, if any. */
else if (token->type == CPP_PRAGMA)
cp_parser_pragma (parser, pragma_external);
/* Allow stray semicolons. */
else if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
/* Mark methods as optional or required, when building protocols. */
else if (token->keyword == RID_AT_OPTIONAL)
{
cp_lexer_consume_token (parser->lexer);
objc_set_method_opt (true);
}
else if (token->keyword == RID_AT_REQUIRED)
{
cp_lexer_consume_token (parser->lexer);
objc_set_method_opt (false);
}
else if (token->keyword == RID_NAMESPACE)
cp_parser_namespace_definition (parser);
/* Other stray characters must generate errors. */
else if (token->type == CPP_OPEN_BRACE || token->type == CPP_CLOSE_BRACE)
{
cp_lexer_consume_token (parser->lexer);
error ("stray %qs between Objective-C++ methods",
token->type == CPP_OPEN_BRACE ? "{" : "}");
}
/* Finally, try to parse a block-declaration, or a function-definition. */
else
cp_parser_block_declaration (parser, /*statement_p=*/false);
}
/* Parse a method signature. */
static tree
cp_parser_objc_method_signature (cp_parser* parser, tree* attributes)
{
tree rettype, kwdparms, optparms;
bool ellipsis = false;
bool is_class_method;
is_class_method = cp_parser_objc_method_type (parser);
rettype = cp_parser_objc_typename (parser);
*attributes = NULL_TREE;
kwdparms = cp_parser_objc_method_keyword_params (parser, attributes);
if (kwdparms == error_mark_node)
return error_mark_node;
optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis, attributes);
if (optparms == error_mark_node)
return error_mark_node;
return objc_build_method_signature (is_class_method, rettype, kwdparms, optparms, ellipsis);
}
static bool
cp_parser_objc_method_maybe_bad_prefix_attributes (cp_parser* parser)
{
tree tattr;
cp_lexer_save_tokens (parser->lexer);
tattr = cp_parser_attributes_opt (parser);
gcc_assert (tattr) ;
/* If the attributes are followed by a method introducer, this is not allowed.
Dump the attributes and flag the situation. */
if (cp_lexer_next_token_is (parser->lexer, CPP_PLUS)
|| cp_lexer_next_token_is (parser->lexer, CPP_MINUS))
return true;
/* Otherwise, the attributes introduce some interstitial code, possibly so
rewind to allow that check. */
cp_lexer_rollback_tokens (parser->lexer);
return false;
}
/* Parse an Objective-C method prototype list. */
static void
cp_parser_objc_method_prototype_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END && token->type != CPP_EOF)
{
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
tree attributes, sig;
bool is_class_method;
if (token->type == CPP_PLUS)
is_class_method = true;
else
is_class_method = false;
sig = cp_parser_objc_method_signature (parser, &attributes);
if (sig == error_mark_node)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
continue;
}
objc_add_method_declaration (is_class_method, sig, attributes);
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
else if (token->keyword == RID_AT_PROPERTY)
cp_parser_objc_at_property_declaration (parser);
else if (token->keyword == RID_ATTRIBUTE
&& cp_parser_objc_method_maybe_bad_prefix_attributes(parser))
warning_at (cp_lexer_peek_token (parser->lexer)->location,
OPT_Wattributes,
"prefix attributes are ignored for methods");
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
else
cp_parser_error (parser, "expected %<@end%>");
objc_finish_interface ();
}
/* Parse an Objective-C method definition list. */
static void
cp_parser_objc_method_definition_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END && token->type != CPP_EOF)
{
tree meth;
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
cp_token *ptk;
tree sig, attribute;
bool is_class_method;
if (token->type == CPP_PLUS)
is_class_method = true;
else
is_class_method = false;
push_deferring_access_checks (dk_deferred);
sig = cp_parser_objc_method_signature (parser, &attribute);
if (sig == error_mark_node)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
continue;
}
objc_start_method_definition (is_class_method, sig, attribute,
NULL_TREE);
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
ptk = cp_lexer_peek_token (parser->lexer);
if (!(ptk->type == CPP_PLUS || ptk->type == CPP_MINUS
|| ptk->type == CPP_EOF || ptk->keyword == RID_AT_END))
{
perform_deferred_access_checks ();
stop_deferring_access_checks ();
meth = cp_parser_function_definition_after_declarator (parser,
false);
pop_deferring_access_checks ();
objc_finish_method_definition (meth);
}
}
/* The following case will be removed once @synthesize is
completely implemented. */
else if (token->keyword == RID_AT_PROPERTY)
cp_parser_objc_at_property_declaration (parser);
else if (token->keyword == RID_AT_SYNTHESIZE)
cp_parser_objc_at_synthesize_declaration (parser);
else if (token->keyword == RID_AT_DYNAMIC)
cp_parser_objc_at_dynamic_declaration (parser);
else if (token->keyword == RID_ATTRIBUTE
&& cp_parser_objc_method_maybe_bad_prefix_attributes(parser))
warning_at (token->location, OPT_Wattributes,
"prefix attributes are ignored for methods");
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
else
cp_parser_error (parser, "expected %<@end%>");
objc_finish_implementation ();
}
/* Parse Objective-C ivars. */
static void
cp_parser_objc_class_ivars (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_OPEN_BRACE)
return; /* No ivars specified. */
cp_lexer_consume_token (parser->lexer); /* Eat '{'. */
token = cp_lexer_peek_token (parser->lexer);
while (token->type != CPP_CLOSE_BRACE
&& token->keyword != RID_AT_END && token->type != CPP_EOF)
{
cp_decl_specifier_seq declspecs;
int decl_class_or_enum_p;
tree prefix_attributes;
cp_parser_objc_visibility_spec (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
break;
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&declspecs,
&decl_class_or_enum_p);
/* auto, register, static, extern, mutable. */
if (declspecs.storage_class != sc_none)
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.storage_class = sc_none;
}
/* __thread. */
if (declspecs.specs[(int) ds_thread])
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.specs[(int) ds_thread] = 0;
}
/* typedef. */
if (declspecs.specs[(int) ds_typedef])
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.specs[(int) ds_typedef] = 0;
}
prefix_attributes = declspecs.attributes;
declspecs.attributes = NULL_TREE;
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree width = NULL_TREE, attributes, first_attribute, decl;
cp_declarator *declarator = NULL;
int ctor_dtor_or_conv_p;
/* Check for a (possibly unnamed) bitfield declaration. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COLON)
goto eat_colon;
if (token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
/* Get the name of the bitfield. */
declarator = make_id_declarator (NULL_TREE,
cp_parser_identifier (parser),
sfk_none);
eat_colon:
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
}
else
{
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
}
/* Look for attributes that apply to the ivar. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
if (width)
/* Create the bitfield declaration. */
decl = grokbitfield (declarator, &declspecs,
width,
attributes);
else
decl = grokfield (declarator, &declspecs,
NULL_TREE, /*init_const_expr_p=*/false,
NULL_TREE, attributes);
/* Add the instance variable. */
if (decl != error_mark_node && decl != NULL_TREE)
objc_add_instance_variable (decl);
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
continue;
}
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->keyword == RID_AT_END)
cp_parser_error (parser, "expected %<}%>");
/* Do not consume the RID_AT_END, so it will be read again as terminating
the @interface of @implementation. */
if (token->keyword != RID_AT_END && token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '}'. */
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C protocol declaration. */
static void
cp_parser_objc_protocol_declaration (cp_parser* parser, tree attributes)
{
tree proto, protorefs;
cp_token *tok;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME))
{
tok = cp_lexer_peek_token (parser->lexer);
error_at (tok->location, "identifier expected after %<@protocol%>");
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
/* See if we have a forward declaration or a definition. */
tok = cp_lexer_peek_nth_token (parser->lexer, 2);
/* Try a forward declaration first. */
if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON)
{
while (true)
{
tree id;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
break;
objc_declare_protocol (id, attributes);
if(cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Ok, we got a full-fledged definition (or at least should). */
else
{
proto = cp_parser_identifier (parser);
protorefs = cp_parser_objc_protocol_refs_opt (parser);
objc_start_protocol (proto, protorefs, attributes);
cp_parser_objc_method_prototype_list (parser);
}
}
/* Parse an Objective-C superclass or category. */
static void
cp_parser_objc_superclass_or_category (cp_parser *parser,
bool iface_p,
tree *super,
tree *categ, bool *is_class_extension)
{
cp_token *next = cp_lexer_peek_token (parser->lexer);
*super = *categ = NULL_TREE;
*is_class_extension = false;
if (next->type == CPP_COLON)
{
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
*super = cp_parser_identifier (parser);
}
else if (next->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
/* If there is no category name, and this is an @interface, we
have a class extension. */
if (iface_p && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
{
*categ = NULL_TREE;
*is_class_extension = true;
}
else
*categ = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
}
/* Parse an Objective-C class interface. */
static void
cp_parser_objc_class_interface (cp_parser* parser, tree attributes)
{
tree name, super, categ, protos;
bool is_class_extension;
cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */
name = cp_parser_identifier (parser);
if (name == error_mark_node)
{
/* It's hard to recover because even if valid @interface stuff
is to follow, we can't compile it (or validate it) if we
don't even know which class it refers to. Let's assume this
was a stray '@interface' token in the stream and skip it.
*/
return;
}
cp_parser_objc_superclass_or_category (parser, true, &super, &categ,
&is_class_extension);
protos = cp_parser_objc_protocol_refs_opt (parser);
/* We have either a class or a category on our hands. */
if (categ || is_class_extension)
objc_start_category_interface (name, categ, protos, attributes);
else
{
objc_start_class_interface (name, super, protos, attributes);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_interface ();
}
cp_parser_objc_method_prototype_list (parser);
}
/* Parse an Objective-C class implementation. */
static void
cp_parser_objc_class_implementation (cp_parser* parser)
{
tree name, super, categ;
bool is_class_extension;
cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */
name = cp_parser_identifier (parser);
if (name == error_mark_node)
{
/* It's hard to recover because even if valid @implementation
stuff is to follow, we can't compile it (or validate it) if
we don't even know which class it refers to. Let's assume
this was a stray '@implementation' token in the stream and
skip it.
*/
return;
}
cp_parser_objc_superclass_or_category (parser, false, &super, &categ,
&is_class_extension);
/* We have either a class or a category on our hands. */
if (categ)
objc_start_category_implementation (name, categ);
else
{
objc_start_class_implementation (name, super);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_implementation ();
}
cp_parser_objc_method_definition_list (parser);
}
/* Consume the @end token and finish off the implementation. */
static void
cp_parser_objc_end_implementation (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
objc_finish_implementation ();
}
/* Parse an Objective-C declaration. */
static void
cp_parser_objc_declaration (cp_parser* parser, tree attributes)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
if (attributes)
switch (kwd->keyword)
{
case RID_AT_ALIAS:
case RID_AT_CLASS:
case RID_AT_END:
error_at (kwd->location, "attributes may not be specified before"
" the %<@%D%> Objective-C++ keyword",
kwd->u.value);
attributes = NULL;
break;
case RID_AT_IMPLEMENTATION:
warning_at (kwd->location, OPT_Wattributes,
"prefix attributes are ignored before %<@%D%>",
kwd->u.value);
attributes = NULL;
default:
break;
}
switch (kwd->keyword)
{
case RID_AT_ALIAS:
cp_parser_objc_alias_declaration (parser);
break;
case RID_AT_CLASS:
cp_parser_objc_class_declaration (parser);
break;
case RID_AT_PROTOCOL:
cp_parser_objc_protocol_declaration (parser, attributes);
break;
case RID_AT_INTERFACE:
cp_parser_objc_class_interface (parser, attributes);
break;
case RID_AT_IMPLEMENTATION:
cp_parser_objc_class_implementation (parser);
break;
case RID_AT_END:
cp_parser_objc_end_implementation (parser);
break;
default:
error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
}
/* Parse an Objective-C try-catch-finally statement.
objc-try-catch-finally-stmt:
@try compound-statement objc-catch-clause-seq [opt]
objc-finally-clause [opt]
objc-catch-clause-seq:
objc-catch-clause objc-catch-clause-seq [opt]
objc-catch-clause:
@catch ( objc-exception-declaration ) compound-statement
objc-finally-clause:
@finally compound-statement
objc-exception-declaration:
parameter-declaration
'...'
where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS.
Returns NULL_TREE.
PS: This function is identical to c_parser_objc_try_catch_finally_statement
for C. Keep them in sync. */
static tree
cp_parser_objc_try_catch_finally_statement (cp_parser *parser)
{
location_t location;
tree stmt;
cp_parser_require_keyword (parser, RID_AT_TRY, RT_AT_TRY);
location = cp_lexer_peek_token (parser->lexer)->location;
objc_maybe_warn_exceptions (location);
/* NB: The @try block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
objc_begin_try_stmt (location, pop_stmt_list (stmt));
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH))
{
cp_parameter_declarator *parm;
tree parameter_declaration = error_mark_node;
bool seen_open_paren = false;
cp_lexer_consume_token (parser->lexer);
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
seen_open_paren = true;
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* We have "@catch (...)" (where the '...' are literally
what is in the code). Skip the '...'.
parameter_declaration is set to NULL_TREE, and
objc_being_catch_clauses() knows that that means
'...'. */
cp_lexer_consume_token (parser->lexer);
parameter_declaration = NULL_TREE;
}
else
{
/* We have "@catch (NSException *exception)" or something
like that. Parse the parameter declaration. */
parm = cp_parser_parameter_declaration (parser, false, NULL);
if (parm == NULL)
parameter_declaration = error_mark_node;
else
parameter_declaration = grokdeclarator (parm->declarator,
&parm->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
}
if (seen_open_paren)
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
else
{
/* If there was no open parenthesis, we are recovering from
an error, and we are trying to figure out what mistake
the user has made. */
/* If there is an immediate closing parenthesis, the user
probably forgot the opening one (ie, they typed "@catch
NSException *e)". Parse the closing parenthesis and keep
going. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
cp_lexer_consume_token (parser->lexer);
/* If these is no immediate closing parenthesis, the user
probably doesn't know that parenthesis are required at
all (ie, they typed "@catch NSException *e"). So, just
forget about the closing parenthesis and keep going. */
}
objc_begin_catch_clause (parameter_declaration);
cp_parser_compound_statement (parser, NULL, false, false);
objc_finish_catch_clause ();
}
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY))
{
cp_lexer_consume_token (parser->lexer);
location = cp_lexer_peek_token (parser->lexer)->location;
/* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
objc_build_finally_clause (location, pop_stmt_list (stmt));
}
return objc_finish_try_stmt ();
}
/* Parse an Objective-C synchronized statement.
objc-synchronized-stmt:
@synchronized ( expression ) compound-statement
Returns NULL_TREE. */
static tree
cp_parser_objc_synchronized_statement (cp_parser *parser)
{
location_t location;
tree lock, stmt;
cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, RT_AT_SYNCHRONIZED);
location = cp_lexer_peek_token (parser->lexer)->location;
objc_maybe_warn_exceptions (location);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
lock = cp_parser_expression (parser, false, NULL);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
return objc_build_synchronized (location, lock, pop_stmt_list (stmt));
}
/* Parse an Objective-C throw statement.
objc-throw-stmt:
@throw assignment-expression [opt] ;
Returns a constructed '@throw' statement. */
static tree
cp_parser_objc_throw_statement (cp_parser *parser)
{
tree expr = NULL_TREE;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_parser_require_keyword (parser, RID_AT_THROW, RT_AT_THROW);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
cp_parser_consume_semicolon_at_end_of_statement (parser);
return objc_build_throw_stmt (loc, expr);
}
/* Parse an Objective-C statement. */
static tree
cp_parser_objc_statement (cp_parser * parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->keyword)
{
case RID_AT_TRY:
return cp_parser_objc_try_catch_finally_statement (parser);
case RID_AT_SYNCHRONIZED:
return cp_parser_objc_synchronized_statement (parser);
case RID_AT_THROW:
return cp_parser_objc_throw_statement (parser);
default:
error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* If we are compiling ObjC++ and we see an __attribute__ we neeed to
look ahead to see if an objc keyword follows the attributes. This
is to detect the use of prefix attributes on ObjC @interface and
@protocol. */
static bool
cp_parser_objc_valid_prefix_attributes (cp_parser* parser, tree *attrib)
{
cp_lexer_save_tokens (parser->lexer);
*attrib = cp_parser_attributes_opt (parser);
gcc_assert (*attrib);
if (OBJC_IS_AT_KEYWORD (cp_lexer_peek_token (parser->lexer)->keyword))
{
cp_lexer_commit_tokens (parser->lexer);
return true;
}
cp_lexer_rollback_tokens (parser->lexer);
return false;
}
/* This routine is a minimal replacement for
c_parser_struct_declaration () used when parsing the list of
types/names or ObjC++ properties. For example, when parsing the
code
@property (readonly) int a, b, c;
this function is responsible for parsing "int a, int b, int c" and
returning the declarations as CHAIN of DECLs.
TODO: Share this code with cp_parser_objc_class_ivars. It's very
similar parsing. */
static tree
cp_parser_objc_struct_declaration (cp_parser *parser)
{
tree decls = NULL_TREE;
cp_decl_specifier_seq declspecs;
int decl_class_or_enum_p;
tree prefix_attributes;
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_NONE,
&declspecs,
&decl_class_or_enum_p);
if (declspecs.type == error_mark_node)
return error_mark_node;
/* auto, register, static, extern, mutable. */
if (declspecs.storage_class != sc_none)
{
cp_parser_error (parser, "invalid type for property");
declspecs.storage_class = sc_none;
}
/* __thread. */
if (declspecs.specs[(int) ds_thread])
{
cp_parser_error (parser, "invalid type for property");
declspecs.specs[(int) ds_thread] = 0;
}
/* typedef. */
if (declspecs.specs[(int) ds_typedef])
{
cp_parser_error (parser, "invalid type for property");
declspecs.specs[(int) ds_typedef] = 0;
}
prefix_attributes = declspecs.attributes;
declspecs.attributes = NULL_TREE;
/* Keep going until we hit the `;' at the end of the declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree attributes, first_attribute, decl;
cp_declarator *declarator;
cp_token *token;
/* Parse the declarator. */
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
NULL, NULL, false);
/* Look for attributes that apply to the ivar. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
decl = grokfield (declarator, &declspecs,
NULL_TREE, /*init_const_expr_p=*/false,
NULL_TREE, attributes);
if (decl == error_mark_node || decl == NULL_TREE)
return error_mark_node;
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
DECL_CHAIN (decl) = decls;
decls = decl;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
continue;
}
else
break;
}
return decls;
}
/* Parse an Objective-C @property declaration. The syntax is:
objc-property-declaration:
'@property' objc-property-attributes[opt] struct-declaration ;
objc-property-attributes:
'(' objc-property-attribute-list ')'
objc-property-attribute-list:
objc-property-attribute
objc-property-attribute-list, objc-property-attribute
objc-property-attribute
'getter' = identifier
'setter' = identifier
'readonly'
'readwrite'
'assign'
'retain'
'copy'
'nonatomic'
For example:
@property NSString *name;
@property (readonly) id object;
@property (retain, nonatomic, getter=getTheName) id name;
@property int a, b, c;
PS: This function is identical to
c_parser_objc_at_property_declaration for C. Keep them in sync. */
static void
cp_parser_objc_at_property_declaration (cp_parser *parser)
{
/* The following variables hold the attributes of the properties as
parsed. They are 'false' or 'NULL_TREE' if the attribute was not
seen. When we see an attribute, we set them to 'true' (if they
are boolean properties) or to the identifier (if they have an
argument, ie, for getter and setter). Note that here we only
parse the list of attributes, check the syntax and accumulate the
attributes that we find. objc_add_property_declaration() will
then process the information. */
bool property_assign = false;
bool property_copy = false;
tree property_getter_ident = NULL_TREE;
bool property_nonatomic = false;
bool property_readonly = false;
bool property_readwrite = false;
bool property_retain = false;
tree property_setter_ident = NULL_TREE;
/* 'properties' is the list of properties that we read. Usually a
single one, but maybe more (eg, in "@property int a, b, c;" there
are three). */
tree properties;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@property'. */
/* Parse the optional attribute list... */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
/* Eat the '('. */
cp_lexer_consume_token (parser->lexer);
while (true)
{
bool syntax_error = false;
cp_token *token = cp_lexer_peek_token (parser->lexer);
enum rid keyword;
if (token->type != CPP_NAME)
{
cp_parser_error (parser, "expected identifier");
break;
}
keyword = C_RID_CODE (token->u.value);
cp_lexer_consume_token (parser->lexer);
switch (keyword)
{
case RID_ASSIGN: property_assign = true; break;
case RID_COPY: property_copy = true; break;
case RID_NONATOMIC: property_nonatomic = true; break;
case RID_READONLY: property_readonly = true; break;
case RID_READWRITE: property_readwrite = true; break;
case RID_RETAIN: property_retain = true; break;
case RID_GETTER:
case RID_SETTER:
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
{
if (keyword == RID_GETTER)
cp_parser_error (parser,
"missing %<=%> (after %<getter%> attribute)");
else
cp_parser_error (parser,
"missing %<=%> (after %<setter%> attribute)");
syntax_error = true;
break;
}
cp_lexer_consume_token (parser->lexer); /* eat the = */
if (!cp_parser_objc_selector_p (cp_lexer_peek_token (parser->lexer)->type))
{
cp_parser_error (parser, "expected identifier");
syntax_error = true;
break;
}
if (keyword == RID_SETTER)
{
if (property_setter_ident != NULL_TREE)
{
cp_parser_error (parser, "the %<setter%> attribute may only be specified once");
cp_lexer_consume_token (parser->lexer);
}
else
property_setter_ident = cp_parser_objc_selector (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
cp_parser_error (parser, "setter name must terminate with %<:%>");
else
cp_lexer_consume_token (parser->lexer);
}
else
{
if (property_getter_ident != NULL_TREE)
{
cp_parser_error (parser, "the %<getter%> attribute may only be specified once");
cp_lexer_consume_token (parser->lexer);
}
else
property_getter_ident = cp_parser_objc_selector (parser);
}
break;
default:
cp_parser_error (parser, "unknown property attribute");
syntax_error = true;
break;
}
if (syntax_error)
break;
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
/* FIXME: "@property (setter, assign);" will generate a spurious
"error: expected ‘)’ before ‘,’ token". This is because
cp_parser_require, unlike the C counterpart, will produce an
error even if we are in error recovery. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
}
/* ... and the property declaration(s). */
properties = cp_parser_objc_struct_declaration (parser);
if (properties == error_mark_node)
{
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
return;
}
if (properties == NULL_TREE)
cp_parser_error (parser, "expected identifier");
else
{
/* Comma-separated properties are chained together in
reverse order; add them one by one. */
properties = nreverse (properties);
for (; properties; properties = TREE_CHAIN (properties))
objc_add_property_declaration (loc, copy_node (properties),
property_readonly, property_readwrite,
property_assign, property_retain,
property_copy, property_nonatomic,
property_getter_ident, property_setter_ident);
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an Objective-C++ @synthesize declaration. The syntax is:
objc-synthesize-declaration:
@synthesize objc-synthesize-identifier-list ;
objc-synthesize-identifier-list:
objc-synthesize-identifier
objc-synthesize-identifier-list, objc-synthesize-identifier
objc-synthesize-identifier
identifier
identifier = identifier
For example:
@synthesize MyProperty;
@synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty;
PS: This function is identical to c_parser_objc_at_synthesize_declaration
for C. Keep them in sync.
*/
static void
cp_parser_objc_at_synthesize_declaration (cp_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@synthesize'. */
while (true)
{
tree property, ivar;
property = cp_parser_identifier (parser);
if (property == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
cp_lexer_consume_token (parser->lexer);
ivar = cp_parser_identifier (parser);
if (ivar == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
}
else
ivar = NULL_TREE;
list = chainon (list, build_tree_list (ivar, property));
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
objc_add_synthesize_declaration (loc, list);
}
/* Parse an Objective-C++ @dynamic declaration. The syntax is:
objc-dynamic-declaration:
@dynamic identifier-list ;
For example:
@dynamic MyProperty;
@dynamic MyProperty, AnotherProperty;
PS: This function is identical to c_parser_objc_at_dynamic_declaration
for C. Keep them in sync.
*/
static void
cp_parser_objc_at_dynamic_declaration (cp_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@dynamic'. */
while (true)
{
tree property;
property = cp_parser_identifier (parser);
if (property == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
list = chainon (list, build_tree_list (NULL, property));
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
objc_add_dynamic_declaration (loc, list);
}
/* OpenMP 2.5 parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
cp_parser_omp_clause_name (cp_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
break;
case 'f':
if (!strcmp ("final", p))
result = PRAGMA_OMP_CLAUSE_FINAL;
else if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
break;
case 'm':
if (!strcmp ("mergeable", p))
result = PRAGMA_OMP_CLAUSE_MERGEABLE;
break;
case 'n':
if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
case 'u':
if (!strcmp ("untied", p))
result = PRAGMA_OMP_CLAUSE_UNTIED;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
cp_lexer_consume_token (parser->lexer);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum omp_clause_code code,
const char *name, location_t location)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
error_at (location, "too many %qs clauses", name);
break;
}
}
/* OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
In addition, we match a closing parenthesis. An opening parenthesis
will have been consumed by the caller.
If KIND is nonzero, create the appropriate node and install the decl
in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created. */
static tree
cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
tree list)
{
cp_token *token;
while (1)
{
tree name, decl;
token = cp_lexer_peek_token (parser->lexer);
name = cp_parser_id_expression (parser, /*template_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
if (name == error_mark_node)
goto skip_comma;
decl = cp_parser_lookup_name_simple (parser, name, token->location);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, name, decl, NLE_NULL,
token->location);
else if (kind != 0)
{
tree u = build_omp_clause (token->location, kind);
OMP_CLAUSE_DECL (u) = decl;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
else
list = tree_cons (decl, NULL_TREE, list);
get_comma:
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
cp_lexer_consume_token (parser->lexer);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
int ending;
/* Try to resync to an unnested comma. Copied from
cp_parser_parenthesized_expression_list. */
skip_comma:
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for omp clauses. */
static tree
cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list)
{
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return cp_parser_omp_var_list_no_open (parser, kind, list);
return list;
}
/* OpenMP 3.0:
collapse ( constant-expression ) */
static tree
cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location)
{
tree c, num;
location_t loc;
HOST_WIDE_INT n;
loc = cp_lexer_peek_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
num = cp_parser_constant_expression (parser, false, NULL);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (num == error_mark_node)
return list;
num = fold_non_dependent_expr (num);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !host_integerp (num, 0)
|| (n = tree_low_cst (num, 0)) <= 0
|| (int) n != n)
{
error_at (loc, "collapse argument needs positive constant integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse", location);
c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
return c;
}
/* OpenMP 2.5:
default ( shared | none ) */
static tree
cp_parser_omp_clause_default (cp_parser *parser, tree list, location_t location)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
tree c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 's':
if (strcmp ("shared", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
cp_lexer_consume_token (parser->lexer);
}
else
{
invalid_kind:
cp_parser_error (parser, "expected %<none%> or %<shared%>");
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default", location);
c = build_omp_clause (location, OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 3.1:
final ( expression ) */
static tree
cp_parser_omp_clause_final (cp_parser *parser, tree list, location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final", location);
c = build_omp_clause (location, OMP_CLAUSE_FINAL);
OMP_CLAUSE_FINAL_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
if ( expression ) */
static tree
cp_parser_omp_clause_if (cp_parser *parser, tree list, location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if", location);
c = build_omp_clause (location, OMP_CLAUSE_IF);
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 3.1:
mergeable */
static tree
cp_parser_omp_clause_mergeable (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable",
location);
c = build_omp_clause (location, OMP_CLAUSE_MERGEABLE);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
nowait */
static tree
cp_parser_omp_clause_nowait (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait", location);
c = build_omp_clause (location, OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
cp_parser_omp_clause_num_threads (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_expression (parser, false, NULL);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS,
"num_threads", location);
c = build_omp_clause (location, OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
ordered */
static tree
cp_parser_omp_clause_ordered (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED,
"ordered", location);
c = build_omp_clause (location, OMP_CLAUSE_ORDERED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && ||
OpenMP 3.1:
reduction-operator:
One of: + * - & ^ | && || min max */
static tree
cp_parser_omp_clause_reduction (cp_parser *parser, tree list)
{
enum tree_code code;
tree nlist, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
case CPP_NAME:
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "min") == 0)
{
code = MIN_EXPR;
break;
}
if (strcmp (p, "max") == 0)
{
code = MAX_EXPR;
break;
}
}
/* FALLTHROUGH */
default:
cp_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, %<^%>, "
"%<|%>, %<&&%>, %<||%>, %<min%> or %<max%>");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
goto resync_fail;
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list);
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_REDUCTION_CODE (c) = code;
return nlist;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime | auto */
static tree
cp_parser_omp_clause_schedule (cp_parser *parser, tree list, location_t location)
{
tree c, t;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
c = build_omp_clause (location, OMP_CLAUSE_SCHEDULE);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token;
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
t = cp_parser_assignment_expression (parser, false, NULL);
if (t == error_mark_node)
goto resync_fail;
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error_at (token->location, "schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
error_at (token->location, "schedule %<auto%> does not take "
"a %<chunk_size%> parameter");
else
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
goto resync_fail;
}
else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN))
goto resync_fail;
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule", location);
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
cp_parser_error (parser, "invalid schedule kind");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
/* OpenMP 3.0:
untied */
static tree
cp_parser_omp_clause_untied (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied", location);
c = build_omp_clause (location, OMP_CLAUSE_UNTIED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
static tree
cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask,
const char *where, cp_token *pragma_tok)
{
tree clauses = NULL;
bool first = true;
cp_token *token = NULL;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
{
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
c_kind = cp_parser_omp_clause_name (parser);
first = false;
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = cp_parser_omp_clause_collapse (parser, clauses,
token->location);
c_name = "collapse";
break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE,
clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = cp_parser_omp_clause_default (parser, clauses,
token->location);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FINAL:
clauses = cp_parser_omp_clause_final (parser, clauses, token->location);
c_name = "final";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE,
clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = cp_parser_omp_clause_if (parser, clauses, token->location);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE,
clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_MERGEABLE:
clauses = cp_parser_omp_clause_mergeable (parser, clauses,
token->location);
c_name = "mergeable";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = cp_parser_omp_clause_nowait (parser, clauses, token->location);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = cp_parser_omp_clause_num_threads (parser, clauses,
token->location);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = cp_parser_omp_clause_ordered (parser, clauses,
token->location);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE,
clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = cp_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = cp_parser_omp_clause_schedule (parser, clauses,
token->location);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED,
clauses);
c_name = "shared";
break;
case PRAGMA_OMP_CLAUSE_UNTIED:
clauses = cp_parser_omp_clause_untied (parser, clauses,
token->location);
c_name = "nowait";
break;
default:
cp_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (token->location, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return finish_omp_clauses (clauses);
}
/* OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
cp_parser_statement calls add_stmt. */
static unsigned
cp_parser_begin_omp_structured_block (cp_parser *parser)
{
unsigned save = parser->in_statement;
/* Only move the values to IN_OMP_BLOCK if they weren't false.
This preserves the "not within loop or switch" style error messages
for nonsense cases like
void foo() {
#pragma omp single
break;
}
*/
if (parser->in_statement)
parser->in_statement = IN_OMP_BLOCK;
return save;
}
static void
cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save)
{
parser->in_statement = save;
}
static tree
cp_parser_omp_structured_block (cp_parser *parser)
{
tree stmt = begin_omp_structured_block ();
unsigned int save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_structured_block (stmt);
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type.
OpenMP 3.1:
# pragma omp atomic new-line
update-stmt
# pragma omp atomic read new-line
read-stmt
# pragma omp atomic write new-line
write-stmt
# pragma omp atomic update new-line
update-stmt
# pragma omp atomic capture new-line
capture-stmt
# pragma omp atomic capture new-line
capture-block
read-stmt:
v = x
write-stmt:
x = expr
update-stmt:
expression-stmt | x = x binop expr
capture-stmt:
v = x binop= expr | v = x++ | v = ++x | v = x-- | v = --x
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; }
where x and v are lvalue expressions with scalar type. */
static void
cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok)
{
tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE, lhs1 = NULL_TREE;
tree rhs1 = NULL_TREE, orig_lhs;
enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR;
bool structured_block = false;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (!strcmp (p, "read"))
code = OMP_ATOMIC_READ;
else if (!strcmp (p, "write"))
code = NOP_EXPR;
else if (!strcmp (p, "update"))
code = OMP_ATOMIC;
else if (!strcmp (p, "capture"))
code = OMP_ATOMIC_CAPTURE_NEW;
else
p = NULL;
if (p)
cp_lexer_consume_token (parser->lexer);
}
cp_parser_require_pragma_eol (parser, pragma_tok);
switch (code)
{
case OMP_ATOMIC_READ:
case NOP_EXPR: /* atomic write */
v = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
if (code == NOP_EXPR)
lhs = cp_parser_expression (parser, /*cast_p=*/false, NULL);
else
lhs = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (lhs == error_mark_node)
goto saw_error;
if (code == NOP_EXPR)
{
/* atomic write is represented by OMP_ATOMIC with NOP_EXPR
opcode. */
code = OMP_ATOMIC;
rhs = lhs;
lhs = v;
v = NULL_TREE;
}
goto done;
case OMP_ATOMIC_CAPTURE_NEW:
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_consume_token (parser->lexer);
structured_block = true;
}
else
{
v = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
}
default:
break;
}
restart:
lhs = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
orig_lhs = lhs;
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
goto saw_error;
case POSTINCREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
opcode = PLUS_EXPR;
rhs = integer_one_node;
break;
case POSTDECREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
opcode = MINUS_EXPR;
rhs = integer_one_node;
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR
&& TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0)
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND
(TREE_OPERAND (lhs, 1), 0), 0)))
== BOOLEAN_TYPE)
/* Undo effects of boolean_increment for post {in,de}crement. */
lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_CODE (lhs) == MODIFY_EXPR
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE)
{
/* Undo effects of boolean_increment. */
if (integer_onep (TREE_OPERAND (lhs, 1)))
{
/* This is pre or post increment. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
opcode = NOP_EXPR;
if (code == OMP_ATOMIC_CAPTURE_NEW
&& !structured_block
&& TREE_CODE (orig_lhs) == COMPOUND_EXPR)
code = OMP_ATOMIC_CAPTURE_OLD;
break;
}
}
/* FALLTHRU */
default:
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_MULT_EQ:
opcode = MULT_EXPR;
break;
case CPP_DIV_EQ:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
opcode = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
opcode = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
opcode = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
opcode = BIT_XOR_EXPR;
break;
case CPP_EQ:
if (structured_block || code == OMP_ATOMIC)
{
enum cp_parser_prec oprec;
cp_token *token;
cp_lexer_consume_token (parser->lexer);
rhs1 = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (rhs1 == error_mark_node)
goto saw_error;
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_SEMICOLON:
if (code == OMP_ATOMIC_CAPTURE_NEW)
{
code = OMP_ATOMIC_CAPTURE_OLD;
v = lhs;
lhs = NULL_TREE;
lhs1 = rhs1;
rhs1 = NULL_TREE;
cp_lexer_consume_token (parser->lexer);
goto restart;
}
cp_parser_error (parser,
"invalid form of %<#pragma omp atomic%>");
goto saw_error;
case CPP_MULT:
opcode = MULT_EXPR;
break;
case CPP_DIV:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS:
opcode = PLUS_EXPR;
break;
case CPP_MINUS:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
opcode = RSHIFT_EXPR;
break;
case CPP_AND:
opcode = BIT_AND_EXPR;
break;
case CPP_OR:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR:
opcode = BIT_XOR_EXPR;
break;
default:
cp_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
oprec = TOKEN_PRECEDENCE (token);
gcc_assert (oprec != PREC_NOT_OPERATOR);
if (commutative_tree_code (opcode))
oprec = (enum cp_parser_prec) (oprec - 1);
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false, false,
oprec, NULL);
if (rhs == error_mark_node)
goto saw_error;
goto stmt_done;
}
/* FALLTHROUGH */
default:
cp_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_expression (parser, false, NULL);
if (rhs == error_mark_node)
goto saw_error;
break;
}
stmt_done:
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))
goto saw_error;
v = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
lhs1 = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (lhs1 == error_mark_node)
goto saw_error;
}
if (structured_block)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
done:
finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1);
if (!structured_block)
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
saw_error:
cp_parser_skip_to_end_of_block_or_statement (parser);
if (structured_block)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
cp_lexer_consume_token (parser->lexer);
else if (code == OMP_ATOMIC_CAPTURE_NEW)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
cp_lexer_consume_token (parser->lexer);
}
}
}
/* OpenMP 2.5:
# pragma omp barrier new-line */
static void
cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_barrier ();
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block */
static tree
cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, name = NULL;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
name = cp_parser_identifier (parser);
if (name == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (name == error_mark_node)
name = NULL;
}
cp_parser_require_pragma_eol (parser, pragma_tok);
stmt = cp_parser_omp_structured_block (parser);
return c_finish_omp_critical (input_location, stmt, name);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
(void) cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_flush ();
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_cond (cp_parser *parser, tree decl)
{
tree cond = cp_parser_binary_expression (parser, false, true,
PREC_NOT_OPERATOR, NULL);
if (cond == error_mark_node
|| cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
switch (TREE_CODE (cond))
{
case GT_EXPR:
case GE_EXPR:
case LT_EXPR:
case LE_EXPR:
break;
default:
return error_mark_node;
}
/* If decl is an iterator, preserve LHS and RHS of the relational
expr until finish_omp_for. */
if (decl
&& (type_dependent_expression_p (decl)
|| CLASS_TYPE_P (TREE_TYPE (decl))))
return cond;
return build_x_binary_op (TREE_CODE (cond),
TREE_OPERAND (cond, 0), ERROR_MARK,
TREE_OPERAND (cond, 1), ERROR_MARK,
/*overload=*/NULL, tf_warning_or_error);
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_incr (cp_parser *parser, tree decl)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
enum tree_code op;
tree lhs, rhs;
cp_id_kind idk;
bool decl_first;
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? PREINCREMENT_EXPR : PREDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
lhs = cp_parser_cast_expression (parser, false, false, NULL);
if (lhs != decl)
return error_mark_node;
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
lhs = cp_parser_primary_expression (parser, false, false, false, &idk);
if (lhs != decl)
return error_mark_node;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
op = cp_parser_assignment_operator_opt (parser);
if (op == ERROR_MARK)
return error_mark_node;
if (op != NOP_EXPR)
{
rhs = cp_parser_assignment_expression (parser, false, NULL);
rhs = build2 (op, TREE_TYPE (decl), decl, rhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
lhs = cp_parser_binary_expression (parser, false, false,
PREC_ADDITIVE_EXPRESSION, NULL);
token = cp_lexer_peek_token (parser->lexer);
decl_first = lhs == decl;
if (decl_first)
lhs = NULL_TREE;
if (token->type != CPP_PLUS
&& token->type != CPP_MINUS)
return error_mark_node;
do
{
op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR;
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false, false,
PREC_ADDITIVE_EXPRESSION, NULL);
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first)
{
if (lhs == NULL_TREE)
{
if (op == PLUS_EXPR)
lhs = rhs;
else
lhs = build_x_unary_op (NEGATE_EXPR, rhs, tf_warning_or_error);
}
else
lhs = build_x_binary_op (op, lhs, ERROR_MARK, rhs, ERROR_MARK,
NULL, tf_warning_or_error);
}
}
while (token->type == CPP_PLUS || token->type == CPP_MINUS);
if (!decl_first)
{
if (rhs != decl || op == MINUS_EXPR)
return error_mark_node;
rhs = build2 (op, TREE_TYPE (decl), lhs, decl);
}
else
rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
/* Parse the restricted form of the for statement allowed by OpenMP. */
static tree
cp_parser_omp_for_loop (cp_parser *parser, tree clauses, tree *par_clauses)
{
tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret;
tree real_decl, initv, condv, incrv, declv;
tree this_pre_body, cl;
location_t loc_first;
bool collapse_err = false;
int i, collapse = 1, nbraces = 0;
VEC(tree,gc) *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0);
gcc_assert (collapse >= 1);
declv = make_tree_vec (collapse);
initv = make_tree_vec (collapse);
condv = make_tree_vec (collapse);
incrv = make_tree_vec (collapse);
loc_first = cp_lexer_peek_token (parser->lexer)->location;
for (i = 0; i < collapse; i++)
{
int bracecount = 0;
bool add_private_clause = false;
location_t loc;
if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
cp_parser_error (parser, "for statement expected");
return NULL;
}
loc = cp_lexer_consume_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return NULL;
init = decl = real_decl = NULL;
this_pre_body = push_stmt_list ();
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
/* See 2.5.1 (in OpenMP 3.0, similar wording is in 2.5 standard too):
init-expr:
var = lb
integer-type var = lb
random-access-iterator-type var = lb
pointer-type var = lb
*/
cp_decl_specifier_seq type_specifiers;
/* First, try to parse as an initialized declaration. See
cp_parser_condition, from whence the bulk of this is copied. */
cp_parser_parse_tentatively (parser);
cp_parser_type_specifier_seq (parser, /*is_declaration=*/true,
/*is_trailing_return=*/false,
&type_specifiers);
if (cp_parser_parse_definitely (parser))
{
/* If parsing a type specifier seq succeeded, then this
MUST be a initialized declaration. */
tree asm_specification, attributes;
cp_declarator *declarator;
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
attributes = cp_parser_attributes_opt (parser);
asm_specification = cp_parser_asm_specification_opt (parser);
if (declarator == cp_error_declarator)
cp_parser_skip_to_end_of_statement (parser);
else
{
tree pushed_scope, auto_node;
decl = start_decl (declarator, &type_specifiers,
SD_INITIALIZED, attributes,
/*prefix_attributes=*/NULL_TREE,
&pushed_scope);
auto_node = type_uses_auto (TREE_TYPE (decl));
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
{
if (cp_lexer_next_token_is (parser->lexer,
CPP_OPEN_PAREN))
error ("parenthesized initialization is not allowed in "
"OpenMP %<for%> loop");
else
/* Trigger an error. */
cp_parser_require (parser, CPP_EQ, RT_EQ);
init = error_mark_node;
cp_parser_skip_to_end_of_statement (parser);
}
else if (CLASS_TYPE_P (TREE_TYPE (decl))
|| type_dependent_expression_p (decl)
|| auto_node)
{
bool is_direct_init, is_non_constant_init;
init = cp_parser_initializer (parser,
&is_direct_init,
&is_non_constant_init);
if (auto_node)
{
TREE_TYPE (decl)
= do_auto_deduction (TREE_TYPE (decl), init,
auto_node);
if (!CLASS_TYPE_P (TREE_TYPE (decl))
&& !type_dependent_expression_p (decl))
goto non_class;
}
cp_finish_decl (decl, init, !is_non_constant_init,
asm_specification,
LOOKUP_ONLYCONVERTING);
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
VEC_safe_push (tree, gc, for_block, this_pre_body);
init = NULL_TREE;
}
else
init = pop_stmt_list (this_pre_body);
this_pre_body = NULL_TREE;
}
else
{
/* Consume '='. */
cp_lexer_consume_token (parser->lexer);
init = cp_parser_assignment_expression (parser, false, NULL);
non_class:
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
init = error_mark_node;
else
cp_finish_decl (decl, NULL_TREE,
/*init_const_expr_p=*/false,
asm_specification,
LOOKUP_ONLYCONVERTING);
}
if (pushed_scope)
pop_scope (pushed_scope);
}
}
else
{
cp_id_kind idk;
/* If parsing a type specifier sequence failed, then
this MUST be a simple expression. */
cp_parser_parse_tentatively (parser);
decl = cp_parser_primary_expression (parser, false, false,
false, &idk);
if (!cp_parser_error_occurred (parser)
&& decl
&& DECL_P (decl)
&& CLASS_TYPE_P (TREE_TYPE (decl)))
{
tree rhs;
cp_parser_parse_definitely (parser);
cp_parser_require (parser, CPP_EQ, RT_EQ);
rhs = cp_parser_assignment_expression (parser, false, NULL);
finish_expr_stmt (build_x_modify_expr (decl, NOP_EXPR,
rhs,
tf_warning_or_error));
add_private_clause = true;
}
else
{
decl = NULL;
cp_parser_abort_tentative_parse (parser);
init = cp_parser_expression (parser, false, NULL);
if (init)
{
if (TREE_CODE (init) == MODIFY_EXPR
|| TREE_CODE (init) == MODOP_EXPR)
real_decl = TREE_OPERAND (init, 0);
}
}
}
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (this_pre_body)
{
this_pre_body = pop_stmt_list (this_pre_body);
if (pre_body)
{
tree t = pre_body;
pre_body = push_stmt_list ();
add_stmt (t);
add_stmt (this_pre_body);
pre_body = pop_stmt_list (pre_body);
}
else
pre_body = this_pre_body;
}
if (decl)
real_decl = decl;
if (par_clauses != NULL && real_decl != NULL_TREE)
{
tree *c;
for (c = par_clauses; *c ; )
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
error_at (loc, "iteration variable %qD"
" should not be firstprivate", real_decl);
*c = OMP_CLAUSE_CHAIN (*c);
}
else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
/* Add lastprivate (decl) clause to OMP_FOR_CLAUSES,
change it to shared (decl) in OMP_PARALLEL_CLAUSES. */
tree l = build_omp_clause (loc, OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (l) = real_decl;
OMP_CLAUSE_CHAIN (l) = clauses;
CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c);
clauses = l;
OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED);
CP_OMP_CLAUSE_INFO (*c) = NULL;
add_private_clause = false;
}
else
{
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
add_private_clause = false;
c = &OMP_CLAUSE_CHAIN (*c);
}
}
if (add_private_clause)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& OMP_CLAUSE_DECL (c) == decl)
break;
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (c) == decl)
error_at (loc, "iteration variable %qD "
"should not be firstprivate",
decl);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_DECL (c) == decl)
error_at (loc, "iteration variable %qD should not be reduction",
decl);
}
if (c == NULL)
{
c = build_omp_clause (loc, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
c = finish_omp_clauses (c);
if (c)
{
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
}
}
cond = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cond = cp_parser_omp_for_cond (parser, decl);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
incr = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
{
/* If decl is an iterator, preserve the operator on decl
until finish_omp_for. */
if (real_decl
&& ((processing_template_decl
&& !POINTER_TYPE_P (TREE_TYPE (real_decl)))
|| CLASS_TYPE_P (TREE_TYPE (real_decl))))
incr = cp_parser_omp_for_incr (parser, real_decl);
else
incr = cp_parser_expression (parser, false, NULL);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
if (i == collapse - 1)
break;
/* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
in between the collapsed for loops to be still considered perfectly
nested. Hopefully the final version clarifies this.
For now handle (multiple) {'s and empty statements. */
cp_parser_parse_tentatively (parser);
do
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_consume_token (parser->lexer);
bracecount++;
}
else if (bracecount
&& cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
loc = cp_lexer_peek_token (parser->lexer)->location;
error_at (loc, "not enough collapsed for loops");
collapse_err = true;
cp_parser_abort_tentative_parse (parser);
declv = NULL_TREE;
break;
}
}
while (1);
if (declv)
{
cp_parser_parse_definitely (parser);
nbraces += bracecount;
}
}
/* Note that we saved the original contents of this flag when we entered
the structured block, and so we don't need to re-save it here. */
parser->in_statement = IN_OMP_FOR;
/* Note that the grammar doesn't call for a structured block here,
though the loop as a whole is a structured block. */
body = push_stmt_list ();
cp_parser_statement (parser, NULL_TREE, false, NULL);
body = pop_stmt_list (body);
if (declv == NULL_TREE)
ret = NULL_TREE;
else
ret = finish_omp_for (loc_first, declv, initv, condv, incrv, body,
pre_body, clauses);
while (nbraces)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
cp_lexer_consume_token (parser->lexer);
nbraces--;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
if (!collapse_err)
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"collapsed loops not perfectly nested");
}
collapse_err = true;
cp_parser_statement_seq_opt (parser, NULL);
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
break;
}
}
while (!VEC_empty (tree, for_block))
add_stmt (pop_stmt_list (VEC_pop (tree, for_block)));
release_tree_vector (for_block);
return ret;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop */
#define OMP_FOR_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (1u << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, sb, ret;
unsigned int save;
clauses = cp_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK,
"#pragma omp for", pragma_tok);
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_for_loop (parser, clauses, NULL);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block */
static tree
cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_master (input_location,
cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block */
static tree
cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok)
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_ordered (loc, cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block */
static tree
cp_parser_omp_sections_scope (cp_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
cp_token *tok;
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
return NULL_TREE;
stmt = push_stmt_list ();
if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION)
{
unsigned save;
substmt = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
while (1)
{
cp_parser_statement (parser, NULL_TREE, false, NULL);
tok = cp_lexer_peek_token (parser->lexer);
if (tok->pragma_kind == PRAGMA_OMP_SECTION)
break;
if (tok->type == CPP_CLOSE_BRACE)
break;
if (tok->type == CPP_EOF)
break;
}
cp_parser_end_omp_structured_block (parser, save);
substmt = finish_omp_structured_block (substmt);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
while (1)
{
tok = cp_lexer_peek_token (parser->lexer);
if (tok->type == CPP_CLOSE_BRACE)
break;
if (tok->type == CPP_EOF)
break;
if (tok->pragma_kind == PRAGMA_OMP_SECTION)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_require_pragma_eol (parser, tok);
error_suppress = false;
}
else if (!error_suppress)
{
cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = cp_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
add_stmt (stmt);
return stmt;
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope */
#define OMP_SECTIONS_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, ret;
clauses = cp_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK,
"#pragma omp sections", pragma_tok);
ret = cp_parser_omp_sections_scope (parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
return ret;
}
/* OpenMP 2.5:
# pragma parallel parallel-clause new-line
# pragma parallel for parallel-for-clause new-line
# pragma parallel sections parallel-sections-clause new-line */
#define OMP_PARALLEL_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_COPYIN) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS))
static tree
cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok)
{
enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL;
const char *p_name = "#pragma omp parallel";
tree stmt, clauses, par_clause, ws_clause, block;
unsigned int mask = OMP_PARALLEL_CLAUSE_MASK;
unsigned int save;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
cp_lexer_consume_token (parser->lexer);
p_kind = PRAGMA_OMP_PARALLEL_FOR;
p_name = "#pragma omp parallel for";
mask |= OMP_FOR_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "sections") == 0)
{
cp_lexer_consume_token (parser->lexer);
p_kind = PRAGMA_OMP_PARALLEL_SECTIONS;
p_name = "#pragma omp parallel sections";
mask |= OMP_SECTIONS_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
}
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
switch (p_kind)
{
case PRAGMA_OMP_PARALLEL:
cp_parser_statement (parser, NULL_TREE, false, NULL);
par_clause = clauses;
break;
case PRAGMA_OMP_PARALLEL_FOR:
c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause);
cp_parser_omp_for_loop (parser, ws_clause, &par_clause);
break;
case PRAGMA_OMP_PARALLEL_SECTIONS:
c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause);
stmt = cp_parser_omp_sections_scope (parser);
if (stmt)
OMP_SECTIONS_CLAUSES (stmt) = ws_clause;
break;
default:
gcc_unreachable ();
}
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_omp_parallel (par_clause, block);
if (p_kind != PRAGMA_OMP_PARALLEL)
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block */
#define OMP_SINGLE_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt = make_node (OMP_SINGLE);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single", pragma_tok);
OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
structured-block */
#define OMP_TASK_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_UNTIED) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_FINAL) \
| (1u << PRAGMA_OMP_CLAUSE_MERGEABLE))
static tree
cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, block;
unsigned int save;
clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
"#pragma omp task", pragma_tok);
block = begin_omp_task ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_task (clauses, block);
}
/* OpenMP 3.0:
# pragma omp taskwait new-line */
static void
cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_taskwait ();
}
/* OpenMP 3.1:
# pragma omp taskyield new-line */
static void
cp_parser_omp_taskyield (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_taskyield ();
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok)
{
tree vars;
vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_threadprivate (vars);
}
/* Main entry point to OpenMP statement pragmas. */
static void
cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt;
switch (pragma_tok->pragma_kind)
{
case PRAGMA_OMP_ATOMIC:
cp_parser_omp_atomic (parser, pragma_tok);
return;
case PRAGMA_OMP_CRITICAL:
stmt = cp_parser_omp_critical (parser, pragma_tok);
break;
case PRAGMA_OMP_FOR:
stmt = cp_parser_omp_for (parser, pragma_tok);
break;
case PRAGMA_OMP_MASTER:
stmt = cp_parser_omp_master (parser, pragma_tok);
break;
case PRAGMA_OMP_ORDERED:
stmt = cp_parser_omp_ordered (parser, pragma_tok);
break;
case PRAGMA_OMP_PARALLEL:
stmt = cp_parser_omp_parallel (parser, pragma_tok);
break;
case PRAGMA_OMP_SECTIONS:
stmt = cp_parser_omp_sections (parser, pragma_tok);
break;
case PRAGMA_OMP_SINGLE:
stmt = cp_parser_omp_single (parser, pragma_tok);
break;
case PRAGMA_OMP_TASK:
stmt = cp_parser_omp_task (parser, pragma_tok);
break;
default:
gcc_unreachable ();
}
if (stmt)
SET_EXPR_LOCATION (stmt, pragma_tok->location);
}
/* Transactional Memory parsing routines. */
/* Parse a transaction attribute.
txn-attribute:
attribute
[ [ identifier ] ]
??? Simplify this when C++0x bracket attributes are
implemented properly. */
static tree
cp_parser_txn_attribute_opt (cp_parser *parser)
{
cp_token *token;
tree attr_name, attr = NULL;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
return cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
return NULL_TREE;
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE))
goto error1;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME || token->type == CPP_KEYWORD)
{
token = cp_lexer_consume_token (parser->lexer);
attr_name = (token->type == CPP_KEYWORD
/* For keywords, use the canonical spelling,
not the parsed identifier. */
? ridpointers[(int) token->keyword]
: token->u.value);
attr = build_tree_list (attr_name, NULL_TREE);
}
else
cp_parser_error (parser, "expected identifier");
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
error1:
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return attr;
}
/* Parse a __transaction_atomic or __transaction_relaxed statement.
transaction-statement:
__transaction_atomic txn-attribute[opt] txn-noexcept-spec[opt]
compound-statement
__transaction_relaxed txn-noexcept-spec[opt] compound-statement
*/
static tree
cp_parser_transaction (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char this_in = 1, new_in;
cp_token *token;
tree stmt, attrs, noex;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
/* Parse a noexcept specification. */
noex = cp_parser_noexcept_specification_opt (parser, true, NULL, true);
/* Keep track if we're in the lexical scope of an outer transaction. */
new_in = this_in | (old_in & TM_STMT_ATTR_OUTER);
stmt = begin_transaction_stmt (token->location, NULL, this_in);
parser->in_transaction = new_in;
cp_parser_compound_statement (parser, NULL, false, false);
parser->in_transaction = old_in;
finish_transaction_stmt (stmt, NULL, this_in, noex);
return stmt;
}
/* Parse a __transaction_atomic or __transaction_relaxed expression.
transaction-expression:
__transaction_atomic txn-noexcept-spec[opt] ( expression )
__transaction_relaxed txn-noexcept-spec[opt] ( expression )
*/
static tree
cp_parser_transaction_expression (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char this_in = 1;
cp_token *token;
tree expr, noex;
bool noex_expr;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
if (!flag_tm)
error (keyword == RID_TRANSACTION_RELAXED
? G_("%<__transaction_relaxed%> without transactional memory "
"support enabled")
: G_("%<__transaction_atomic%> without transactional memory "
"support enabled"));
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
/* Set this early. This might mean that we allow transaction_cancel in
an expression that we find out later actually has to be a constexpr.
However, we expect that cxx_constant_value will be able to deal with
this; also, if the noexcept has no constexpr, then what we parse next
really is a transaction's body. */
parser->in_transaction = this_in;
/* Parse a noexcept specification. */
noex = cp_parser_noexcept_specification_opt (parser, false, &noex_expr,
true);
if (!noex || !noex_expr
|| cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
{
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
finish_parenthesized_expr (expr);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
else
{
/* The only expression that is available got parsed for the noexcept
already. noexcept is true then. */
expr = noex;
noex = boolean_true_node;
}
expr = build_transaction_expr (token->location, expr, this_in, noex);
parser->in_transaction = old_in;
if (cp_parser_non_integral_constant_expression (parser, NIC_TRANSACTION))
return error_mark_node;
return (flag_tm ? expr : error_mark_node);
}
/* Parse a function-transaction-block.
function-transaction-block:
__transaction_atomic txn-attribute[opt] ctor-initializer[opt]
function-body
__transaction_atomic txn-attribute[opt] function-try-block
__transaction_relaxed ctor-initializer[opt] function-body
__transaction_relaxed function-try-block
*/
static bool
cp_parser_function_transaction (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char new_in = 1;
tree compound_stmt, stmt, attrs;
bool ctor_initializer_p;
cp_token *token;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
new_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
new_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
stmt = begin_transaction_stmt (token->location, &compound_stmt, new_in);
parser->in_transaction = new_in;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
else
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
parser->in_transaction = old_in;
finish_transaction_stmt (stmt, compound_stmt, new_in, NULL_TREE);
return ctor_initializer_p;
}
/* Parse a __transaction_cancel statement.
cancel-statement:
__transaction_cancel txn-attribute[opt] ;
__transaction_cancel txn-attribute[opt] throw-expression ;
??? Cancel and throw is not yet implemented. */
static tree
cp_parser_transaction_cancel (cp_parser *parser)
{
cp_token *token;
bool is_outer = false;
tree stmt, attrs;
token = cp_parser_require_keyword (parser, RID_TRANSACTION_CANCEL,
RT_TRANSACTION_CANCEL);
gcc_assert (token != NULL);
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0);
/* ??? Parse cancel-and-throw here. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (!flag_tm)
{
error_at (token->location, "%<__transaction_cancel%> without "
"transactional memory support enabled");
return error_mark_node;
}
else if (parser->in_transaction & TM_STMT_ATTR_RELAXED)
{
error_at (token->location, "%<__transaction_cancel%> within a "
"%<__transaction_relaxed%>");
return error_mark_node;
}
else if (is_outer)
{
if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0
&& !is_tm_may_cancel_outer (current_function_decl))
{
error_at (token->location, "outer %<__transaction_cancel%> not "
"within outer %<__transaction_atomic%>");
error_at (token->location,
" or a %<transaction_may_cancel_outer%> function");
return error_mark_node;
}
}
else if (parser->in_transaction == 0)
{
error_at (token->location, "%<__transaction_cancel%> not within "
"%<__transaction_atomic%>");
return error_mark_node;
}
stmt = build_tm_abort_call (token->location, is_outer);
add_stmt (stmt);
finish_stmt ();
return stmt;
}
/* The parser. */
static GTY (()) cp_parser *the_parser;
/* Special handling for the first token or line in the file. The first
thing in the file might be #pragma GCC pch_preprocess, which loads a
PCH file, which is a GC collection point. So we need to handle this
first pragma without benefit of an existing lexer structure.
Always returns one token to the caller in *FIRST_TOKEN. This is
either the true first token of the file, or the first token after
the initial pragma. */
static void
cp_parser_initial_pragma (cp_token *first_token)
{
tree name = NULL;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS)
return;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type == CPP_STRING)
{
name = first_token->u.value;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type != CPP_PRAGMA_EOL)
error_at (first_token->location,
"junk at end of %<#pragma GCC pch_preprocess%>");
}
else
error_at (first_token->location, "expected string literal");
/* Skip to the end of the pragma. */
while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF)
cp_lexer_get_preprocessor_token (NULL, first_token);
/* Now actually load the PCH file. */
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
/* Read one more token to return to our caller. We have to do this
after reading the PCH file in, since its pointers have to be
live. */
cp_lexer_get_preprocessor_token (NULL, first_token);
}
/* Normal parsing of a pragma token. Here we can (and must) use the
regular lexer. */
static bool
cp_parser_pragma (cp_parser *parser, enum pragma_context context)
{
cp_token *pragma_tok;
unsigned int id;
pragma_tok = cp_lexer_consume_token (parser->lexer);
gcc_assert (pragma_tok->type == CPP_PRAGMA);
parser->lexer->in_pragma = true;
id = pragma_tok->pragma_kind;
switch (id)
{
case PRAGMA_GCC_PCH_PREPROCESS:
error_at (pragma_tok->location,
"%<#pragma GCC pch_preprocess%> must be first");
break;
case PRAGMA_OMP_BARRIER:
switch (context)
{
case pragma_compound:
cp_parser_omp_barrier (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location, "%<#pragma omp barrier%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_FLUSH:
switch (context)
{
case pragma_compound:
cp_parser_omp_flush (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location, "%<#pragma omp flush%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_TASKWAIT:
switch (context)
{
case pragma_compound:
cp_parser_omp_taskwait (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp taskwait%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_TASKYIELD:
switch (context)
{
case pragma_compound:
cp_parser_omp_taskyield (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp taskyield%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_THREADPRIVATE:
cp_parser_omp_threadprivate (parser, pragma_tok);
return false;
case PRAGMA_OMP_ATOMIC:
case PRAGMA_OMP_CRITICAL:
case PRAGMA_OMP_FOR:
case PRAGMA_OMP_MASTER:
case PRAGMA_OMP_ORDERED:
case PRAGMA_OMP_PARALLEL:
case PRAGMA_OMP_SECTIONS:
case PRAGMA_OMP_SINGLE:
case PRAGMA_OMP_TASK:
if (context == pragma_external)
goto bad_stmt;
cp_parser_omp_construct (parser, pragma_tok);
return true;
case PRAGMA_OMP_SECTION:
error_at (pragma_tok->location,
"%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
break;
default:
gcc_assert (id >= PRAGMA_FIRST_EXTERNAL);
c_invoke_pragma_handler (id);
break;
bad_stmt:
cp_parser_error (parser, "expected declaration specifiers");
break;
}
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value)
{
cp_token *tok;
enum cpp_ttype ret;
tok = cp_lexer_peek_token (the_parser->lexer);
ret = tok->type;
*value = tok->u.value;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else if (ret == CPP_STRING)
*value = cp_parser_string_literal (the_parser, false, false);
else
{
cp_lexer_consume_token (the_parser->lexer);
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
}
return ret;
}
/* External interface. */
/* Parse one entire translation unit. */
void
c_parse_file (void)
{
static bool already_called = false;
if (already_called)
{
sorry ("inter-module optimizations not implemented for C++");
return;
}
already_called = true;
the_parser = cp_parser_new ();
push_deferring_access_checks (flag_access_control
? dk_no_deferred : dk_no_check);
cp_parser_translation_unit (the_parser);
the_parser = NULL;
}
#include "gt-cp-parser.h"
|
cgemm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgemm.c, normal z -> c, Fri Sep 28 17:38:01 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gemm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^H, \f]
*
* alpha and beta are scalars, and A, B and C are matrices, with op( A )
* an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] transb
* - PlasmaNoTrans: B is not transposed,
* - PlasmaTrans: B is transposed,
* - PlasmaConjTrans: B is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrix op( A ) and of the matrix C.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix op( B ) and of the matrix C.
* n >= 0.
*
* @param[in] k
* The number of columns of the matrix op( A ) and the number of rows
* of the matrix op( B ). k >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans,
* and is m otherwise.
*
* @param[in] lda
* The leading dimension of the array A.
* When transa = PlasmaNoTrans, lda >= max(1,m),
* otherwise, lda >= max(1,k).
*
* @param[in] pB
* An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans,
* and is k otherwise.
*
* @param[in] ldb
* The leading dimension of the array B.
* When transb = PlasmaNoTrans, ldb >= max(1,k),
* otherwise, ldb >= max(1,n).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n
* matrix ( alpha*op( A )*op( B ) + beta*C ).
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_cgemm
* @sa plasma_cgemm
* @sa plasma_dgemm
* @sa plasma_sgemm
*
******************************************************************************/
int plasma_cgemm(plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
plasma_complex32_t alpha, plasma_complex32_t *pA, int lda,
plasma_complex32_t *pB, int ldb,
plasma_complex32_t beta, plasma_complex32_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -1;
}
if ((transb != PlasmaNoTrans) &&
(transb != PlasmaTrans) &&
(transb != PlasmaConjTrans)) {
plasma_error("illegal value of transb");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (k < 0) {
plasma_error("illegal value of k");
return -5;
}
int am, an;
int bm, bn;
if (transa == PlasmaNoTrans) {
am = m;
an = k;
}
else {
am = k;
an = m;
}
if (transb == PlasmaNoTrans) {
bm = k;
bn = n;
}
else {
bm = n;
bn = k;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -10;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -13;
}
// quick return
if (m == 0 || n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gemm(plasma, PlasmaComplexFloat, m, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
plasma_omp_cge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_cgemm(transa, transb,
alpha, A,
B,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gemm
*
* Performs matrix multiplication.
* Non-blocking tile version of plasma_cgemm().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] transb
* - PlasmaNoTrans: B is not transposed,
* - PlasmaTrans: B is transposed,
* - PlasmaConjTrans: B is conjugate transposed.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_cgemm
* @sa plasma_omp_cgemm
* @sa plasma_omp_dgemm
* @sa plasma_omp_sgemm
*
******************************************************************************/
void plasma_omp_cgemm(plasma_enum_t transa, plasma_enum_t transb,
plasma_complex32_t alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_complex32_t beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transb != PlasmaNoTrans) &&
(transb != PlasmaTrans) &&
(transb != PlasmaConjTrans)) {
plasma_error("illegal value of transb");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = transa == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pcgemm(transa, transb,
alpha, A,
B,
beta, C,
sequence, request);
}
|
rose_break2.c | #include "omp.h"
int i;
int j;
int a[100][100];
void foo()
{
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
for (j = 0; j <= 99; j += 1) {
a[i][j] = a[i][j] + 1;
if (a[i][j] == 100)
break;
}
}
}
|
exemplo_section.c | #include "exemplos.h"
// Exemplo Section
void times_table(int n)
{
int i, i_times_n, thread_id;
thread_id = omp_get_thread_num();
for (i = 1; i <= n; ++i)
{
i_times_n = i * n;
printf("Thread %d says %d times %d equals %d.\n",
thread_id, i, n, i_times_n);
sleep(1);
}
}
void countdown()
{
int i, thread_id;
thread_id = omp_get_thread_num();
for (i = 10; i >= 1; --i)
{
printf("Thread %d says %d...\n", thread_id, i);
sleep(1);
}
printf("Thread %d says \"Lift off!\"\n", thread_id);
}
void long_loop()
{
int i, thread_id;
double sum = 0;
thread_id = omp_get_thread_num();
for (i = 1; i <= 10; ++i)
{
sum += (i * i);
sleep(1);
}
printf("Thread %d says the sum of the long loop is %f\n",
thread_id, sum);
}
int main(int argc, char **argv)
{
int nthreads, thread_id;
printf("I am the main thread.\n");
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
{
times_table(12);
}
#pragma omp section
{
countdown();
}
#pragma omp section
{
long_loop();
}
}
}
printf("Here I am, back to the main thread.\n");
return 0;
} |
omp_workshare2.c | /******************************************************************************
* FILE: omp_workshare2.c
* DESCRIPTION:
* OpenMP Example - Sections Work-sharing - C Version
* In this example, the OpenMP SECTION directive is used to assign
* different array operations to each thread that executes a SECTION.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 07/16/07
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
int main (int argc, char *argv[])
{
int i, nthreads, tid;
float a[N], b[N], c[N], d[N];
/* Some initializations */
for (i=0; i<N; i++) {
a[i] = i * 1.5;
b[i] = i + 22.35;
c[i] = d[i] = 0.0;
}
#pragma omp parallel shared(a,b,c,d,nthreads) private(i,tid)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);
#pragma omp sections nowait
{
#pragma omp section
{
printf("Thread %d doing section 1\n",tid);
for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
}
#pragma omp section
{
printf("Thread %d doing section 2\n",tid);
for (i=0; i<N; i++)
{
d[i] = a[i] * b[i];
printf("Thread %d: d[%d]= %f\n",tid,i,d[i]);
}
}
} /* end of sections */
printf("Thread %d done.\n",tid);
} /* end of parallel section */
}
|
kde_lsh.h | //
// Copyright (c) 2020 xinyan. All rights reserved.
// Created on 2020/3/31.
//
#ifndef SSKDE_INCLUDE_LSH_KDE_H_
#define SSKDE_INCLUDE_LSH_KDE_H_
#include "utils.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <vector>
#include <unordered_map>
using std::vector;
using std::unordered_map;
const T CONST_PI = 3.14159265358979323846264338327950288f;
const T SQRT_2PI = std::sqrt(2.0f / CONST_PI);
class HashBucket {
public:
/**
* \brief default constructor
*/
HashBucket() : count(0), x_() {}
/**
* \brief Build a new hash bucket for data point p.
* \param p a data point that belongs to the bucket
*/
HashBucket(const T* p, int d) : count(1), x_(p, p+d) {}
/**
* Insert point into bucket: update count and replace bucket sample
* with reservoir sampling.
* \brief
* \param p
* \param rng
*/
void update(const T* p, int d) {
count += 1;
auto distribution = std::uniform_real_distribution<T>(0, 1);
T r = distribution(rng);
if (r <= 1.0 / count) {
if (x_.empty()) {
x_.resize(1ull * d);
}
std::memcpy(x_.data(), p, sizeof(T) * d);
}
}
/**
* Insert point into bucket: update count and append p to bucket
* \brief
* \param p
* \param rng
*/
void insert(T* p, int d) {
count += 1;
x_.resize(x_.size() + d);
std::memcpy(x_.data() + x_.size() - d, p, sizeof(T) * d);
}
int size() const {
return count;
}
const T* data() const {
return x_.data();
}
private:
int count;
vector<T> x_;
};
class E2LSH {
public:
/**
* \brief Construct an LSH table
* \param xs dataset
* \param n size of dataset
* \param d dimension of dataset
* \param k number of hash functions
* \param w bin width
*/
E2LSH(int d, int k, T w) : k(k), d(d), w(w) {
a_ = random_normal(k * d);
b_ = random_uniform(k, .0, w);
}
void init(const T* a, const T* b) {
std::memcpy(a_, a, sizeof(T) * k * d);
std::memcpy(b_, b, sizeof(T) * k);
}
~E2LSH() {
delete [] a_;
delete [] b_;
}
vector<int > hash(const T* x) const {
vector<int > v(k, 0);
T* a = a_;
T* b = b_;
for (int i = 0; i < k; ++i, a+=d, b++) {
T t = inner_product(x, a, d) + *b;
v[i] = static_cast<int>(std::ceil(t / w));
}
return v;
}
T probability(T c) const {
if (c <= 1e-10)
return 1.f;
c = c / w;
T base = std::erf(1.0f / c) - SQRT_2PI * c *
(1.0f - std::exp(-1.0f / (2 * c * c)));
return static_cast<T >(std::pow(base, k));
}
public:
const int d;
const int k;
const T w;
private:
T* a_;
T* b_;
};
class HashTable {
public:
/**
* \brief Construct an LSH table
* \param xs dataset
* \param n size of dataset
* \param d dimension of dataset
* \param k number of hash functions
* \param w bin width
*/
explicit HashTable(const E2LSH& lsh) : count_(0), lsh_(lsh) {}
void insert(const T* x) {
vector<int > key = lsh_.hash(x);
auto it = table_.find(key);
if (it == table_.end()) {
table_[key] = HashBucket(x, lsh_.d);
count_++;
} else {
it->second.update(x, lsh_.d);
}
}
/**
* \brief Find hash buckets that the query falls in.
* \param query query point
* \return
*/
T query(const T* q) const {
vector<int > key = lsh_.hash(q);
HashBucket bucket;
const auto it = table_.find(key);
if (it == table_.end()) {
return 0.f;
} else {
T c_sqr = l2dist_sqr(q, it->second.data(), lsh_.d);
return gaussian_kernel(c_sqr, 1.f) * it->second.size()
/ lsh_.probability(std::sqrt(c_sqr));
}
}
int size() {
return count_;
}
private:
int count_;
const E2LSH& lsh_;
unordered_map<vector<int >, HashBucket,
VectorHash<vector<int > > > table_;
};
class HBE {
public:
HBE(const T* xs, int n, int d, int l, int m, int k, T w)
: l_(l), m_(m), n_(n), d_(d) {
hash_.reserve(1ull * l * m);
tables_.reserve(1ull * l * m);
for (int i = 0; i < l * m; ++i) {
hash_.emplace_back(d, k, w);
tables_.emplace_back(hash_.back());
}
#pragma omp parallel for
for (int i = 0; i < l * m; ++i) {
const T* x = xs;
for (int j = 0; j < n; ++j, x+=d) {
tables_[i].insert(x);
}
}
std::cout << "table size : ";
for (int i = 0; i < l * m; ++i) {
std::cout << tables_[i].size() << " ";
}
std::cout << std::endl;
}
T query(const T* q, int l, int m) const {
l = std::min(l, l_);
m = std::min(m, m_);
std::vector<T > z = std::vector<T >(l, 0);
int table_idx = 0;
for (int i = 0; i < l; i ++) {
for (int j = 0; j < m; j ++) {
z[i] += tables_[table_idx++].query(q);
}
}
return median(&z) / n_ / m;
}
private:
int l_;
int m_;
int n_;
int d_;
vector<E2LSH > hash_;
vector<HashTable > tables_;
};
#endif // SSKDE_INCLUDE_LSH_KDE_H_
|
GB_ewise_slice.c | //------------------------------------------------------------------------------
// GB_ewise_slice: slice the entries and vectors for an ewise operation
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Constructs a set of tasks to compute C, for an element-wise operation that
// operates on two input matrices, C=op(A,B). These include:
// GB_add, GB_emult, and GB_masker, and many GB_subassign_* methods
// (02, 04, 06s_and_14, 08n, 08s_and_16, 09, 10_and_18, 11, 12_and_20).
// The mask is ignored for computing where to slice the work, but it is sliced
// once the location has been found.
// M, A, B: any sparsity structure (hypersparse, sparse, bitmap, or full).
// C: constructed as sparse or hypersparse in the caller.
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Coarse, int64_t) ; \
GB_FREE_WERK (&Cwork, Cwork_size) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_FREE_WERK (&TaskList, TaskList_size) ; \
}
#include "GB.h"
//------------------------------------------------------------------------------
// GB_ewise_slice
//------------------------------------------------------------------------------
GrB_Info GB_ewise_slice
(
// output:
GB_task_struct **p_TaskList, // array of structs
size_t *p_TaskList_size, // size of TaskList
int *p_ntasks, // # of tasks constructed
int *p_nthreads, // # of threads for eWise operation
// input:
const int64_t Cnvec, // # of vectors of C
const int64_t *restrict Ch, // vectors of C, if hypersparse
const int64_t *restrict C_to_M, // mapping of C to M
const int64_t *restrict C_to_A, // mapping of C to A
const int64_t *restrict C_to_B, // mapping of C to B
bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only
const GrB_Matrix M, // mask matrix to slice (optional)
const GrB_Matrix A, // matrix to slice
const GrB_Matrix B, // matrix to slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_TaskList != NULL) ;
ASSERT (p_TaskList_size != NULL) ;
ASSERT (p_ntasks != NULL) ;
ASSERT (p_nthreads != NULL) ;
ASSERT_MATRIX_OK (A, "A for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT_MATRIX_OK (B, "B for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (!GB_JUMBLED (B)) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_JUMBLED (M)) ;
ASSERT (!GB_PENDING (M)) ;
(*p_TaskList ) = NULL ;
(*p_TaskList_size) = 0 ;
(*p_ntasks ) = 0 ;
(*p_nthreads ) = 1 ;
int64_t *restrict Cwork = NULL ; size_t Cwork_size = 0 ;
GB_WERK_DECLARE (Coarse, int64_t) ; // size ntasks1+1
int ntasks1 = 0 ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate the initial TaskList
//--------------------------------------------------------------------------
// Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow
// later, if needed. Usually, 64*nthreads_max is enough, but in a few cases
// fine tasks can cause this number to be exceeded. If that occurs,
// TaskList is reallocated.
// When the mask is present, it is often fastest to break the work up
// into tasks, even when nthreads_max is 1.
GB_task_struct *restrict TaskList = NULL ; size_t TaskList_size = 0 ;
int max_ntasks = 0 ;
int ntasks0 = (M == NULL && nthreads_max == 1) ? 1 : (32 * nthreads_max) ;
GB_REALLOC_TASK_WERK (TaskList, ntasks0, max_ntasks) ;
//--------------------------------------------------------------------------
// check for quick return for a single task
//--------------------------------------------------------------------------
if (Cnvec == 0 || ntasks0 == 1)
{
// construct a single coarse task that computes all of C
TaskList [0].kfirst = 0 ;
TaskList [0].klast = Cnvec-1 ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = TaskList_size ;
(*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ;
(*p_nthreads ) = 1 ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// get A, B, and M
//--------------------------------------------------------------------------
const int64_t vlen = A->vlen ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bi = B->i ;
bool Ch_is_Ah = (Ch != NULL && A->h != NULL && Ch == A->h) ;
bool Ch_is_Bh = (Ch != NULL && B->h != NULL && Ch == B->h) ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mi = NULL ;
bool M_is_hyper = GB_IS_HYPERSPARSE (M) ;
if (M != NULL)
{
Mp = M->p ;
Mi = M->i ;
// Ch_is_Mh is true if either true on input (for GB_add, which denotes
// that Ch is a deep copy of M->h), or if Ch is a shallow copy of M->h.
Ch_is_Mh = Ch_is_Mh || (Ch != NULL && M_is_hyper && Ch == M->h) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
Cwork = GB_MALLOC_WERK (Cnvec+1, int64_t, &Cwork_size) ;
if (Cwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute an estimate of the work for each vector of C
//--------------------------------------------------------------------------
int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
//----------------------------------------------------------------------
// get the C(:,j) vector
//----------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
//----------------------------------------------------------------------
// get the corresponding vector of A
//----------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
ASSERT (j == GBH (A->h, kA)) ;
}
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = k ;
ASSERT (j == A->h [kA]) ;
}
else
{
// A is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
kA = j ;
}
//----------------------------------------------------------------------
// get the corresponding vector of B
//----------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
ASSERT (j == GBH (B->h, kB)) ;
}
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = k ;
ASSERT (j == B->h [kB]) ;
}
else
{
// B is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
kB = j ;
}
//----------------------------------------------------------------------
// estimate the work for C(:,j)
//----------------------------------------------------------------------
ASSERT (kA >= -1 && kA < A->nvec) ;
ASSERT (kB >= -1 && kB < B->nvec) ;
const int64_t aknz = (kA < 0) ? 0 :
((Ap == NULL) ? vlen : (Ap [kA+1] - Ap [kA])) ;
const int64_t bknz = (kB < 0) ? 0 :
((Bp == NULL) ? vlen : (Bp [kB+1] - Bp [kB])) ;
Cwork [k] = aknz + bknz + 1 ;
}
//--------------------------------------------------------------------------
// replace Cwork with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork, Context) ;
double cwork = (double) Cwork [Cnvec] ;
//--------------------------------------------------------------------------
// determine # of threads and tasks for the eWise operation
//--------------------------------------------------------------------------
int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ;
ntasks0 = (M == NULL && nthreads == 1) ? 1 : (32 * nthreads) ;
double target_task_size = cwork / (double) (ntasks0) ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
ntasks1 = cwork / target_task_size ;
ntasks1 = GB_IMAX (ntasks1, 1) ;
//--------------------------------------------------------------------------
// slice the work into coarse tasks
//--------------------------------------------------------------------------
GB_WERK_PUSH (Coarse, ntasks1 + 1, int64_t) ;
if (Coarse == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse, Cwork, Cnvec, ntasks1, false) ;
//--------------------------------------------------------------------------
// construct all tasks, both coarse and fine
//--------------------------------------------------------------------------
int ntasks = 0 ;
for (int t = 0 ; t < ntasks1 ; t++)
{
//----------------------------------------------------------------------
// coarse task computes C (:,k:klast)
//----------------------------------------------------------------------
int64_t k = Coarse [t] ;
int64_t klast = Coarse [t+1] - 1 ;
if (k >= Cnvec)
{
//------------------------------------------------------------------
// all tasks have been constructed
//------------------------------------------------------------------
break ;
}
else if (k < klast)
{
//------------------------------------------------------------------
// coarse task has 2 or more vectors
//------------------------------------------------------------------
// This is a non-empty coarse-grain task that does two or more
// entire vectors of C, vectors k:klast, inclusive.
GB_REALLOC_TASK_WERK (TaskList, ntasks + 1, max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = klast ;
ntasks++ ;
}
else
{
//------------------------------------------------------------------
// coarse task has 0 or 1 vectors
//------------------------------------------------------------------
// As a coarse-grain task, this task is empty or does a single
// vector, k. Vector k must be removed from the work done by this
// and any other coarse-grain task, and split into one or more
// fine-grain tasks.
for (int tt = t ; tt < ntasks1 ; tt++)
{
// remove k from the initial slice tt
if (Coarse [tt] == k)
{
// remove k from task tt
Coarse [tt] = k+1 ;
}
else
{
// break, k not in task tt
break ;
}
}
//------------------------------------------------------------------
// get the vector of C
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
//------------------------------------------------------------------
// get the corresponding vector of A
//------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = C_to_A [k] ;
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = k ;
}
else
{
// A is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
kA = j ;
}
int64_t pA_start = (kA < 0) ? (-1) : GBP (Ap, kA, vlen) ;
int64_t pA_end = (kA < 0) ? (-1) : GBP (Ap, kA+1, vlen) ;
bool a_empty = (pA_end == pA_start) ;
//------------------------------------------------------------------
// get the corresponding vector of B
//------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = C_to_B [k] ;
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = k ;
}
else
{
// B is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
kB = j ;
}
int64_t pB_start = (kB < 0) ? (-1) : GBP (Bp, kB, vlen) ;
int64_t pB_end = (kB < 0) ? (-1) : GBP (Bp, kB+1, vlen) ;
bool b_empty = (pB_end == pB_start) ;
//------------------------------------------------------------------
// get the corresponding vector of M, if present
//------------------------------------------------------------------
// M can have any sparsity structure (hyper, sparse, bitmap, full)
int64_t pM_start = -1 ;
int64_t pM_end = -1 ;
if (M != NULL)
{
int64_t kM ;
if (C_to_M != NULL)
{
// M is hypersparse and the C_to_M mapping has been created
ASSERT (GB_IS_HYPERSPARSE (M)) ;
kM = C_to_M [k] ;
}
else if (Ch_is_Mh)
{
// M is hypersparse, but Ch is a copy of Mh
ASSERT (GB_IS_HYPERSPARSE (M)) ;
// Ch is a deep or shallow copy of Mh
kM = k ;
}
else
{
// M is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (M)) ;
kM = j ;
}
pM_start = (kM < 0) ? -1 : GBP (Mp, kM, vlen) ;
pM_end = (kM < 0) ? -1 : GBP (Mp, kM+1, vlen) ;
}
bool m_empty = (pM_end == pM_start) ;
//------------------------------------------------------------------
// determine the # of fine-grain tasks to create for vector k
//------------------------------------------------------------------
double ckwork = Cwork [k+1] - Cwork [k] ;
int nfine = ckwork / target_task_size ;
nfine = GB_IMAX (nfine, 1) ;
// make the TaskList bigger, if needed
GB_REALLOC_TASK_WERK (TaskList, ntasks + nfine, max_ntasks) ;
//------------------------------------------------------------------
// create the fine-grain tasks
//------------------------------------------------------------------
if (nfine == 1)
{
//--------------------------------------------------------------
// this is a single coarse task for all of vector k
//--------------------------------------------------------------
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = k ;
ntasks++ ;
}
else
{
//--------------------------------------------------------------
// slice vector k into nfine fine tasks
//--------------------------------------------------------------
// first fine task starts at the top of vector k
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = (m_empty) ? -1 : pM_start ;
TaskList [ntasks].pA = (a_empty) ? -1 : pA_start ;
TaskList [ntasks].pB = (b_empty) ? -1 : pB_start ;
TaskList [ntasks].len = 0 ; // to be determined below
ntasks++ ;
int64_t ilast = 0, i = 0 ;
for (int tfine = 1 ; tfine < nfine ; tfine++)
{
double target_work = ((nfine-tfine) * ckwork) / nfine ;
int64_t pM, pA, pB ;
GB_slice_vector (&i, &pM, &pA, &pB,
pM_start, pM_end, Mi,
pA_start, pA_end, Ai,
pB_start, pB_end, Bi,
vlen, target_work) ;
// prior task ends at pM-1, pA-1, and pB-1
TaskList [ntasks-1].pM_end = pM ;
TaskList [ntasks-1].pA_end = pA ;
TaskList [ntasks-1].pB_end = pB ;
// prior task handles indices ilast:i-1
TaskList [ntasks-1].len = i - ilast ;
// this task starts at pM, pA, and pB
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = pM ;
TaskList [ntasks].pA = pA ;
TaskList [ntasks].pB = pB ;
// advance to the next task
ntasks++ ;
ilast = i ;
}
// Terminate the last fine task.
ASSERT (ntasks <= max_ntasks) ;
TaskList [ntasks-1].pM_end = (m_empty) ? -1 : pM_end ;
TaskList [ntasks-1].pA_end = (a_empty) ? -1 : pA_end ;
TaskList [ntasks-1].pB_end = (b_empty) ? -1 : pB_end ;
TaskList [ntasks-1].len = vlen - i ;
}
}
}
ASSERT (ntasks <= max_ntasks) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = TaskList_size ;
(*p_ntasks ) = ntasks ;
(*p_nthreads ) = nthreads ;
return (GrB_SUCCESS) ;
}
|
GB_unop__identity_fp64_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_int16)
// op(A') function: GB (_unop_tran__identity_fp64_int16)
// C type: double
// A type: int16_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_int16)
(
double *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
laplace_acc-omp.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#ifndef _JUSTOMP_
#include "functions_acc.h"
#endif
#ifndef _JUSTACC_
#include "functions_omp.h"
#endif
// grid size
#define GRIDY 2048
#define GRIDX 2048
#if defined (_UPDATE_INTERNAL_) || (_ALL_INTERNAL_)
#ifndef _PGI_
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
#endif
#endif
// smallest permitted change in temperature
#define MAX_TEMP_ERROR 0.02
double T_new[GRIDX+2][GRIDY+2]; // temperature grid
double T[GRIDX+2][GRIDY+2]; // temperature grid from last iteration
// initialisation routine
void init();
int main(int argc, char *argv[]) {
int i, j; // grid indexes
int max_iterations; // maximal number of iterations
int iteration=1; // iteration
double dt=100; // largest change in temperature
struct timeval start_time, stop_time, elapsed_time; // timers
if(argc!=2) {
printf("Usage: %s number_of_iterations\n",argv[0]);
exit(1);
} else {
max_iterations=atoi(argv[1]);
}
gettimeofday(&start_time,NULL);
init();
// simulation iterations
while ( dt > MAX_TEMP_ERROR && iteration <= max_iterations ) {
// main computational kernel, average over neighbours in the grid
#if defined (_AVERAGE_INTERNAL_) || (_ALL_INTERNAL_)
#ifndef _JUSTOMP_
//#pragma acc kernels
// #pragma acc loop independent //together with kernels above
//:pgi:justacc:(internal):works (slow:because copies every loop)
//:pgi:justacc:(internal)-managed:works (slow:because copies every loop)
//#pragma acc parallel loop collapse(2)
//:pgi:justacc:(internal):works (slow:because copies every loop)
//:pgi:justacc:(internal)-managed:works (slow:because copies every loop)
#pragma acc parallel loop copyin(T) copyout(T_new) collapse(2)
//:pgi:justacc:(internal):works (slow:because copies every loop)
//:pgi:justacc:(internal)-managed:works (slow:because copies every loop)
#else
//#pragma omp target
//:gcc11:justomp:(internal):works (slow:because copies every loop)
#pragma omp target map(to:T) map(from:T_new)
//:gcc11:justomp:(internal):works (slow:because copies every loop)
#pragma omp teams distribute parallel for collapse(2) private(i,j)
#endif
for(i = 1; i <= GRIDX; i++)
#ifndef _JUSTOMP_
//#pragma acc loop independent //together with kernels above
#endif
for(j = 1; j <= GRIDY; j++)
T_new[i][j] = 0.25 * (T[i+1][j] + T[i-1][j] +
T[i][j+1] + T[i][j-1]);
#else
#ifndef _JUSTOMP_
getAverage_acc(GRIDX,GRIDY,T,T_new);
#else
getAverage_omp(GRIDX,GRIDY,T,T_new);
#endif
#endif
// reset dt
dt = 0.0;
// compute the largest change and copy T_new to T
#if defined (_UPDATE_INTERNAL_) || (_ALL_INTERNAL_)
#ifndef _JUSTACC_
//#pragma omp target map(dt)
#pragma omp target map(tofrom:T,dt) map(to:T_new)
#pragma omp teams distribute parallel for collapse(2) reduction(max:dt) private(i,j)
#else
//#pragma acc kernels
// #pragma acc loop independent //together with kernels above
//#pragma acc parallel loop reduction(max:dt) collapse(2)
#pragma acc parallel loop copy(T) copyin(T_new) reduction(max:dt) collapse(2)
#endif
for(i = 1; i <= GRIDX; i++){
#ifndef _JUSTACC_
#define papa 0
#else
//#pragma acc loop independent //together with kernels above
#endif
for(j = 1; j <= GRIDY; j++){
#ifdef _PGI_
dt = fmax( fabs(T_new[i][j]-T[i][j]), dt);
#else
dt = MAX( fabs(T_new[i][j]-T[i][j]), dt);
#endif
T[i][j] = T_new[i][j];
}
}
#else
#ifndef _JUSTACC_
dt = updateT_omp(GRIDX,GRIDY,T,T_new,dt);
#else
dt = updateT_acc(GRIDX,GRIDY,T,T_new,dt);
#endif
#endif
// periodically print largest change
if((iteration % 100) == 0)
printf("Iteration %4.0d, dt %f\n",iteration,dt);
iteration++;
}
gettimeofday(&stop_time,NULL);
timersub(&stop_time, &start_time, &elapsed_time); // measure time
printf("Total time was %f seconds.\n", elapsed_time.tv_sec+elapsed_time.tv_usec/1000000.0);
return 0;
}
// initialize grid and boundary conditions
void init(){
int i,j;
for(i = 0; i <= GRIDX+1; i++){
for (j = 0; j <= GRIDY+1; j++){
T[i][j] = 0.0;
}
}
// these boundary conditions never change throughout run
// set left side to 0 and right to a linear increase
for(i = 0; i <= GRIDX+1; i++) {
T[i][0] = 0.0;
T[i][GRIDY+1] = (128.0/GRIDX)*i;
}
// set top to 0 and bottom to linear increase
for(j = 0; j <= GRIDY+1; j++) {
T[0][j] = 0.0;
T[GRIDX+1][j] = (128.0/GRIDY)*j;
}
}
|
For2_Paralelo.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
int main()
{
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(4);
#endif
int i, n = 9;
int a[9], b[9];
#pragma omp parallel default(none) shared(n,a,b, i)
{
#pragma omp single
printf("Primer ciclo for: el numero de hilos es %d\n",
omp_get_num_threads());
#pragma omp for
for (i=0; i<n; i++)
{
printf("El hilo %d ejecuta en el ciclo la iteracion %d\n",
omp_get_thread_num(),i);
a[i] = i;
}
#pragma omp single
printf("Segundo ciclo for: el numero de hilos %d\n",
omp_get_num_threads());
#pragma omp for
for (i=0; i<n; i++)
{
printf("El hilo %d ejecuta en el ciclo la iteracion %d\n",
omp_get_thread_num(),i);
b[i] = 2 * a[i];
}
} // Final de la region paralela
return(0);
}
|
GB_unop__minv_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_fc32_fc32)
// op(A') function: GB (_unop_tran__minv_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_FC32_minv (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_FC32_minv (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_FC32_minv (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_FC32_minv (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_FC32_minv (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
problem.sine.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
double c1 = 2.0*M_PI;
double c2 = 6.0*M_PI;
double p = 13; // must be odd(?) and allows up to p-2 order MG
*U = pow(sin(c1*x),p )*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Ux = c1*p*cos(c1*x)*pow(sin(c1*x),p-1)*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Uy = c1*p*cos(c1*y)*pow(sin(c1*y),p-1)*pow(sin(c1*x),p)*pow(sin(c1*z),p);
*Uz = c1*p*cos(c1*z)*pow(sin(c1*z),p-1)*pow(sin(c1*x),p)*pow(sin(c1*y),p);
*Uxx = c1*c1*p*( (p-1)*pow(sin(c1*x),p-2)*pow(cos(c1*x),2) - pow(sin(c1*x),p) )*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Uyy = c1*c1*p*( (p-1)*pow(sin(c1*y),p-2)*pow(cos(c1*y),2) - pow(sin(c1*y),p) )*pow(sin(c1*x),p)*pow(sin(c1*z),p);
*Uzz = c1*c1*p*( (p-1)*pow(sin(c1*z),p-2)*pow(cos(c1*z),2) - pow(sin(c1*z),p) )*pow(sin(c1*x),p)*pow(sin(c1*y),p);
*U += pow(sin(c2*x),p )*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Ux += c2*p*cos(c2*x)*pow(sin(c2*x),p-1)*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Uy += c2*p*cos(c2*y)*pow(sin(c2*y),p-1)*pow(sin(c2*x),p)*pow(sin(c2*z),p);
*Uz += c2*p*cos(c2*z)*pow(sin(c2*z),p-1)*pow(sin(c2*x),p)*pow(sin(c2*y),p);
*Uxx += c2*c2*p*( (p-1)*pow(sin(c2*x),p-2)*pow(cos(c2*x),2) - pow(sin(c2*x),p) )*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Uyy += c2*c2*p*( (p-1)*pow(sin(c2*y),p-2)*pow(cos(c2*y),2) - pow(sin(c2*y),p) )*pow(sin(c2*x),p)*pow(sin(c2*z),p);
*Uzz += c2*c2*p*( (p-1)*pow(sin(c2*z),p-2)*pow(cos(c2*z),2) - pow(sin(c2*z),p) )*pow(sin(c2*x),p)*pow(sin(c2*y),p);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
memset(level->my_boxes[box].vectors[VECTOR_ALPHA ],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_I],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_J],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_K],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_UTRUE ],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_F ],0,level->my_boxes[box].volume*sizeof(double));
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
// #pragma omp parallel for private(k,j,i) collapse(3)
hclib::finish([] {
hclib::loop_domain_3d loop(dim_k, dim_j, dim_i);
hclib::forasync3D_nb(&loop, [] (int k, int j, int i) {
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// FIX... move to quadrature version to initialize the problem.
// i.e. the value of an array element is the average value of the function over the cell (finite volume)
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U;
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
});
});
}
// quick test for Poisson...
if(level->alpha_is_zero==-1)level->alpha_is_zero = (dot(level,VECTOR_ALPHA,VECTOR_ALPHA) == 0.0);
}
//------------------------------------------------------------------------------------------------------------------------------
|
graph_generator.c | /* Copyright (C) 2009-2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "user_settings.h"
#include "splittable_mrg.h"
#include "graph_generator.h"
/* Initiator settings: for faster random number generation, the initiator
* probabilities are defined as fractions (a = INITIATOR_A_NUMERATOR /
* INITIATOR_DENOMINATOR, b = c = INITIATOR_BC_NUMERATOR /
* INITIATOR_DENOMINATOR, d = 1 - a - b - c. */
#define INITIATOR_A_NUMERATOR 5700
#define INITIATOR_BC_NUMERATOR 1900
#define INITIATOR_DENOMINATOR 10000
/* If this macro is defined to a non-zero value, use SPK_NOISE_LEVEL /
* INITIATOR_DENOMINATOR as the noise parameter to use in introducing noise
* into the graph parameters. The approach used is from "A Hitchhiker's Guide
* to Choosing Parameters of Stochastic Kronecker Graphs" by C. Seshadhri, Ali
* Pinar, and Tamara G. Kolda (http://arxiv.org/abs/1102.5046v1), except that
* the adjustment here is chosen based on the current level being processed
* rather than being chosen randomly. */
#define SPK_NOISE_LEVEL 1000
/* #define SPK_NOISE_LEVEL 1000 -- in INITIATOR_DENOMINATOR units */
static int generate_4way_bernoulli(mrg_state* st, int level, int nlevels) {
/* Generator a pseudorandom number in the range [0, INITIATOR_DENOMINATOR)
* without modulo bias. */
static const uint32_t limit = (UINT32_C(0xFFFFFFFF) % INITIATOR_DENOMINATOR);
uint32_t val = mrg_get_uint_orig(st);
if (/* Unlikely */ val < limit) {
do {
val = mrg_get_uint_orig(st);
} while (val < limit);
}
#if SPK_NOISE_LEVEL == 0
int spk_noise_factor = 0;
#else
int spk_noise_factor = 2 * SPK_NOISE_LEVEL * level / nlevels - SPK_NOISE_LEVEL;
#endif
int adjusted_bc_numerator = INITIATOR_BC_NUMERATOR + spk_noise_factor;
val %= INITIATOR_DENOMINATOR;
if (val < adjusted_bc_numerator) return 1;
val -= adjusted_bc_numerator;
if (val < adjusted_bc_numerator) return 2;
val -= adjusted_bc_numerator;
#if SPK_NOISE_LEVEL == 0
if (val < INITIATOR_A_NUMERATOR) return 0;
#else
if (val < INITIATOR_A_NUMERATOR * (INITIATOR_DENOMINATOR - 2 * INITIATOR_BC_NUMERATOR) / (INITIATOR_DENOMINATOR - 2 * adjusted_bc_numerator)) return 0;
#endif
return 3;
}
/* Reverse bits in a number; this should be optimized for performance
* (including using bit- or byte-reverse intrinsics if your platform has them).
* */
static inline uint64_t bitreverse(uint64_t x) {
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
#define USE_GCC_BYTESWAP /* __builtin_bswap* are in 4.3 but not 4.2 */
#endif
#ifdef FAST_64BIT_ARITHMETIC
/* 64-bit code */
#ifdef USE_GCC_BYTESWAP
x = __builtin_bswap64(x);
#else
x = (x >> 32) | (x << 32);
x = ((x >> 16) & UINT64_C(0x0000FFFF0000FFFF)) | ((x & UINT64_C(0x0000FFFF0000FFFF)) << 16);
x = ((x >> 8) & UINT64_C(0x00FF00FF00FF00FF)) | ((x & UINT64_C(0x00FF00FF00FF00FF)) << 8);
#endif
x = ((x >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) | ((x & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4);
x = ((x >> 2) & UINT64_C(0x3333333333333333)) | ((x & UINT64_C(0x3333333333333333)) << 2);
x = ((x >> 1) & UINT64_C(0x5555555555555555)) | ((x & UINT64_C(0x5555555555555555)) << 1);
return x;
#else
/* 32-bit code */
uint32_t h = (uint32_t)(x >> 32);
uint32_t l = (uint32_t)(x & UINT32_MAX);
#ifdef USE_GCC_BYTESWAP
h = __builtin_bswap32(h);
l = __builtin_bswap32(l);
#else
h = (h >> 16) | (h << 16);
l = (l >> 16) | (l << 16);
h = ((h >> 8) & UINT32_C(0x00FF00FF)) | ((h & UINT32_C(0x00FF00FF)) << 8);
l = ((l >> 8) & UINT32_C(0x00FF00FF)) | ((l & UINT32_C(0x00FF00FF)) << 8);
#endif
h = ((h >> 4) & UINT32_C(0x0F0F0F0F)) | ((h & UINT32_C(0x0F0F0F0F)) << 4);
l = ((l >> 4) & UINT32_C(0x0F0F0F0F)) | ((l & UINT32_C(0x0F0F0F0F)) << 4);
h = ((h >> 2) & UINT32_C(0x33333333)) | ((h & UINT32_C(0x33333333)) << 2);
l = ((l >> 2) & UINT32_C(0x33333333)) | ((l & UINT32_C(0x33333333)) << 2);
h = ((h >> 1) & UINT32_C(0x55555555)) | ((h & UINT32_C(0x55555555)) << 1);
l = ((l >> 1) & UINT32_C(0x55555555)) | ((l & UINT32_C(0x55555555)) << 1);
return ((uint64_t)l << 32) | h; /* Swap halves */
#endif
}
/* Apply a permutation to scramble vertex numbers; a randomly generated
* permutation is not used because applying it at scale is too expensive. */
static inline int64_t scramble(int64_t v0, int lgN, uint64_t val0, uint64_t val1) {
uint64_t v = (uint64_t)v0;
v += val0 + val1;
v *= (val0 | UINT64_C(0x4519840211493211));
v = (bitreverse(v) >> (64 - lgN));
assert ((v >> lgN) == 0);
v *= (val1 | UINT64_C(0x3050852102C843A5));
v = (bitreverse(v) >> (64 - lgN));
assert ((v >> lgN) == 0);
return (int64_t)v;
}
/* Make a single graph edge using a pre-set MRG state. */
static
void make_one_edge(int64_t nverts, int level, int lgN, mrg_state* st, packed_edge* result, uint64_t val0, uint64_t val1) {
int64_t base_src = 0, base_tgt = 0;
while (nverts > 1) {
int square = generate_4way_bernoulli(st, level, lgN);
int src_offset = square / 2;
int tgt_offset = square % 2;
assert (base_src <= base_tgt);
if (base_src == base_tgt) {
/* Clip-and-flip for undirected graph */
if (src_offset > tgt_offset) {
int temp = src_offset;
src_offset = tgt_offset;
tgt_offset = temp;
}
}
nverts /= 2;
++level;
base_src += nverts * src_offset;
base_tgt += nverts * tgt_offset;
}
write_edge(result,
scramble(base_src, lgN, val0, val1),
scramble(base_tgt, lgN, val0, val1));
}
/* Generate a range of edges (from start_edge to end_edge of the total graph),
* writing into elements [0, end_edge - start_edge) of the edges array. This
* code is parallel on OpenMP and XMT; it must be used with
* separately-implemented SPMD parallelism for MPI. */
void generate_kronecker_range(
const uint_fast32_t seed[5] /* All values in [0, 2^31 - 1), not all zero */,
int logN /* In base 2 */,
int64_t start_edge, int64_t end_edge,
packed_edge* edges) {
mrg_state state;
int64_t nverts = (int64_t)1 << logN;
int64_t ei;
mrg_seed(&state, seed);
uint64_t val0, val1; /* Values for scrambling */
{
mrg_state new_state = state;
mrg_skip(&new_state, 50, 7, 0);
val0 = mrg_get_uint_orig(&new_state);
val0 *= UINT64_C(0xFFFFFFFF);
val0 += mrg_get_uint_orig(&new_state);
val1 = mrg_get_uint_orig(&new_state);
val1 *= UINT64_C(0xFFFFFFFF);
val1 += mrg_get_uint_orig(&new_state);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(1)
#endif
#ifdef __MTA__
#pragma mta assert parallel
#pragma mta block schedule
#endif
for (ei = start_edge; ei < end_edge; ++ei) {
mrg_state new_state = state;
mrg_skip(&new_state, 0, ei, 0);
make_one_edge(nverts, 0, logN, &new_state, edges + (ei - start_edge), val0, val1);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.